From d8cc05b1dfedfa0283cf4d5330aa6220fad44b3a Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Thu, 20 Jul 2023 10:22:16 +0000 Subject: [PATCH] CodeGen from PR 24910 in Azure/azure-rest-api-specs Merge a8bbf75cdd430944019da1c31c1ffb7ae69be50d into e680f9bf4f154ec427ba7feac432ed8efd9665d0 --- sdk/compute/azure-mgmt-compute/_meta.json | 2 +- .../compute/_compute_management_client.py | 29 +- .../azure/mgmt/compute/_version.py | 2 +- .../compute/aio/_compute_management_client.py | 29 +- .../azure/mgmt/compute/models.py | 2 +- .../mgmt/compute/v2015_06_15/_version.py | 2 +- .../mgmt/compute/v2016_03_30/_version.py | 2 +- .../compute/v2016_04_30_preview/_version.py | 2 +- .../mgmt/compute/v2017_03_30/_version.py | 2 +- .../mgmt/compute/v2017_09_01/_version.py | 2 +- .../mgmt/compute/v2017_12_01/_version.py | 2 +- .../mgmt/compute/v2018_04_01/_version.py | 2 +- .../mgmt/compute/v2018_06_01/_version.py | 2 +- .../mgmt/compute/v2018_09_30/_version.py | 2 +- .../mgmt/compute/v2018_10_01/_version.py | 2 +- .../mgmt/compute/v2019_03_01/_version.py | 2 +- .../mgmt/compute/v2019_04_01/_version.py | 2 +- .../mgmt/compute/v2019_07_01/_version.py | 2 +- .../mgmt/compute/v2019_11_01/_version.py | 2 +- .../mgmt/compute/v2019_12_01/_version.py | 2 +- .../mgmt/compute/v2020_05_01/_version.py | 2 +- .../mgmt/compute/v2020_06_01/_version.py | 2 +- .../mgmt/compute/v2020_06_30/_version.py | 2 +- .../mgmt/compute/v2020_09_30/_version.py | 2 +- .../compute/v2020_10_01_preview/_version.py | 2 +- .../mgmt/compute/v2020_12_01/_version.py | 2 +- .../mgmt/compute/v2021_03_01/_version.py | 2 +- .../mgmt/compute/v2021_04_01/_version.py | 2 +- .../mgmt/compute/v2021_07_01/_version.py | 2 +- .../mgmt/compute/v2021_08_01/_version.py | 2 +- .../mgmt/compute/v2021_10_01/_version.py | 2 +- .../mgmt/compute/v2021_11_01/_version.py | 2 +- .../mgmt/compute/v2021_12_01/_version.py | 2 +- .../mgmt/compute/v2022_01_03/_version.py | 2 +- .../mgmt/compute/v2022_03_01/_version.py | 2 +- .../mgmt/compute/v2022_03_02/_version.py | 2 +- .../mgmt/compute/v2022_03_03/_version.py | 2 +- .../mgmt/compute/v2022_04_04/_version.py | 2 +- .../mgmt/compute/v2022_07_02/_version.py | 2 +- .../mgmt/compute/v2022_08_01/_version.py | 2 +- .../_compute_management_client_enums.py | 84 +- .../compute/v2022_08_01/models/_models_py3.py | 850 +-- .../mgmt/compute/v2022_09_04/_version.py | 2 +- .../mgmt/compute/v2022_11_01/_version.py | 2 +- .../mgmt/compute/v2023_01_02/__init__.py | 26 + .../v2023_01_02/_compute_management_client.py | 120 + .../compute/v2023_01_02/_configuration.py | 67 + .../mgmt/compute/v2023_01_02/_metadata.json | 114 + .../azure/mgmt/compute/v2023_01_02/_patch.py | 20 + .../azure/mgmt/compute/v2023_01_02/_vendor.py | 30 + .../mgmt/compute/v2023_01_02/_version.py | 9 + .../mgmt/compute/v2023_01_02/aio/__init__.py | 23 + .../aio/_compute_management_client.py | 120 + .../compute/v2023_01_02/aio/_configuration.py | 67 + .../mgmt/compute/v2023_01_02/aio/_patch.py | 20 + .../v2023_01_02/aio/operations/__init__.py | 27 + .../v2023_01_02/aio/operations/_operations.py | 5289 ++++++++++++++ .../v2023_01_02/aio/operations/_patch.py | 20 + .../compute/v2023_01_02/models/__init__.py | 161 + .../_compute_management_client_enums.py | 266 + .../compute/v2023_01_02/models/_models_py3.py | 3176 ++++++++ .../mgmt/compute/v2023_01_02/models/_patch.py | 20 + .../v2023_01_02/operations/__init__.py | 27 + .../v2023_01_02/operations/_operations.py | 6419 +++++++++++++++++ .../compute/v2023_01_02/operations/_patch.py | 20 + .../azure/mgmt/compute/v2023_01_02/py.typed | 1 + .../mgmt/compute/v2023_03_01/_version.py | 2 +- 67 files changed, 16551 insertions(+), 565 deletions(-) create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/__init__.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_compute_management_client.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_configuration.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_metadata.json create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_patch.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_vendor.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_version.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/__init__.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_compute_management_client.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_configuration.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_patch.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/__init__.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_operations.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_patch.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/__init__.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_compute_management_client_enums.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_models_py3.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_patch.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/__init__.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_operations.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_patch.py create mode 100644 sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/py.typed diff --git a/sdk/compute/azure-mgmt-compute/_meta.json b/sdk/compute/azure-mgmt-compute/_meta.json index d1ba456e5fd72..e5f347db0278c 100644 --- a/sdk/compute/azure-mgmt-compute/_meta.json +++ b/sdk/compute/azure-mgmt-compute/_meta.json @@ -1,5 +1,5 @@ { - "commit": "a1b208ed1aa1ef6cc9fa0709a9a29f31ec931e4e", + "commit": "e2c61089d6fa80f9d839d1e37b0bd44a60778bc5", "repository_url": "https://github.com/Azure/azure-rest-api-specs", "autorest": "3.9.2", "use": [ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py index a68688fa079a5..5a9c5db1b202e 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_compute_management_client.py @@ -66,10 +66,10 @@ class ComputeManagementClient(MultiApiClientMixin, _SDKClient): 'community_galleries': '2022-03-03', 'community_gallery_image_versions': '2022-03-03', 'community_gallery_images': '2022-03-03', - 'disk_accesses': '2022-07-02', - 'disk_encryption_sets': '2022-07-02', - 'disk_restore_point': '2022-07-02', - 'disks': '2022-07-02', + 'disk_accesses': '2023-01-02', + 'disk_encryption_sets': '2023-01-02', + 'disk_restore_point': '2023-01-02', + 'disks': '2023-01-02', 'galleries': '2022-03-03', 'gallery_application_versions': '2022-03-03', 'gallery_applications': '2022-03-03', @@ -80,7 +80,7 @@ class ComputeManagementClient(MultiApiClientMixin, _SDKClient): 'shared_galleries': '2022-03-03', 'shared_gallery_image_versions': '2022-03-03', 'shared_gallery_images': '2022-03-03', - 'snapshots': '2022-07-02', + 'snapshots': '2023-01-02', }}, _PROFILE_TAG + " latest" ) @@ -148,6 +148,7 @@ def models(cls, api_version=DEFAULT_API_VERSION): * 2022-08-01: :mod:`v2022_08_01.models` * 2022-09-04: :mod:`v2022_09_04.models` * 2022-11-01: :mod:`v2022_11_01.models` + * 2023-01-02: :mod:`v2023_01_02.models` * 2023-03-01: :mod:`v2023_03_01.models` """ if api_version == '2015-06-15': @@ -261,6 +262,9 @@ def models(cls, api_version=DEFAULT_API_VERSION): elif api_version == '2022-11-01': from .v2022_11_01 import models return models + elif api_version == '2023-01-02': + from .v2023_01_02 import models + return models elif api_version == '2023-03-01': from .v2023_03_01 import models return models @@ -689,6 +693,7 @@ def disk_accesses(self): * 2021-12-01: :class:`DiskAccessesOperations` * 2022-03-02: :class:`DiskAccessesOperations` * 2022-07-02: :class:`DiskAccessesOperations` + * 2023-01-02: :class:`DiskAccessesOperations` """ api_version = self._get_api_version('disk_accesses') if api_version == '2020-05-01': @@ -709,6 +714,8 @@ def disk_accesses(self): from .v2022_03_02.operations import DiskAccessesOperations as OperationClass elif api_version == '2022-07-02': from .v2022_07_02.operations import DiskAccessesOperations as OperationClass + elif api_version == '2023-01-02': + from .v2023_01_02.operations import DiskAccessesOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disk_accesses'".format(api_version)) self._config.api_version = api_version @@ -729,6 +736,7 @@ def disk_encryption_sets(self): * 2021-12-01: :class:`DiskEncryptionSetsOperations` * 2022-03-02: :class:`DiskEncryptionSetsOperations` * 2022-07-02: :class:`DiskEncryptionSetsOperations` + * 2023-01-02: :class:`DiskEncryptionSetsOperations` """ api_version = self._get_api_version('disk_encryption_sets') if api_version == '2019-07-01': @@ -753,6 +761,8 @@ def disk_encryption_sets(self): from .v2022_03_02.operations import DiskEncryptionSetsOperations as OperationClass elif api_version == '2022-07-02': from .v2022_07_02.operations import DiskEncryptionSetsOperations as OperationClass + elif api_version == '2023-01-02': + from .v2023_01_02.operations import DiskEncryptionSetsOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disk_encryption_sets'".format(api_version)) self._config.api_version = api_version @@ -769,6 +779,7 @@ def disk_restore_point(self): * 2021-12-01: :class:`DiskRestorePointOperations` * 2022-03-02: :class:`DiskRestorePointOperations` * 2022-07-02: :class:`DiskRestorePointOperations` + * 2023-01-02: :class:`DiskRestorePointOperations` """ api_version = self._get_api_version('disk_restore_point') if api_version == '2020-09-30': @@ -785,6 +796,8 @@ def disk_restore_point(self): from .v2022_03_02.operations import DiskRestorePointOperations as OperationClass elif api_version == '2022-07-02': from .v2022_07_02.operations import DiskRestorePointOperations as OperationClass + elif api_version == '2023-01-02': + from .v2023_01_02.operations import DiskRestorePointOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disk_restore_point'".format(api_version)) self._config.api_version = api_version @@ -811,6 +824,7 @@ def disks(self): * 2021-12-01: :class:`DisksOperations` * 2022-03-02: :class:`DisksOperations` * 2022-07-02: :class:`DisksOperations` + * 2023-01-02: :class:`DisksOperations` """ api_version = self._get_api_version('disks') if api_version == '2016-04-30-preview': @@ -847,6 +861,8 @@ def disks(self): from .v2022_03_02.operations import DisksOperations as OperationClass elif api_version == '2022-07-02': from .v2022_07_02.operations import DisksOperations as OperationClass + elif api_version == '2023-01-02': + from .v2023_01_02.operations import DisksOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disks'".format(api_version)) self._config.api_version = api_version @@ -1496,6 +1512,7 @@ def snapshots(self): * 2021-12-01: :class:`SnapshotsOperations` * 2022-03-02: :class:`SnapshotsOperations` * 2022-07-02: :class:`SnapshotsOperations` + * 2023-01-02: :class:`SnapshotsOperations` """ api_version = self._get_api_version('snapshots') if api_version == '2016-04-30-preview': @@ -1532,6 +1549,8 @@ def snapshots(self): from .v2022_03_02.operations import SnapshotsOperations as OperationClass elif api_version == '2022-07-02': from .v2022_07_02.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-01-02': + from .v2023_01_02.operations import SnapshotsOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version)) self._config.api_version = api_version diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py index 8789677aedc06..a30a458f8b5b3 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/_version.py @@ -5,4 +5,4 @@ # license information. # -------------------------------------------------------------------------- -VERSION = "30.0.0" \ No newline at end of file +VERSION = "0.1.0" \ No newline at end of file diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/aio/_compute_management_client.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/aio/_compute_management_client.py index 0e85f9b1fd1f6..fc63f69395994 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/aio/_compute_management_client.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/aio/_compute_management_client.py @@ -66,10 +66,10 @@ class ComputeManagementClient(MultiApiClientMixin, _SDKClient): 'community_galleries': '2022-03-03', 'community_gallery_image_versions': '2022-03-03', 'community_gallery_images': '2022-03-03', - 'disk_accesses': '2022-07-02', - 'disk_encryption_sets': '2022-07-02', - 'disk_restore_point': '2022-07-02', - 'disks': '2022-07-02', + 'disk_accesses': '2023-01-02', + 'disk_encryption_sets': '2023-01-02', + 'disk_restore_point': '2023-01-02', + 'disks': '2023-01-02', 'galleries': '2022-03-03', 'gallery_application_versions': '2022-03-03', 'gallery_applications': '2022-03-03', @@ -80,7 +80,7 @@ class ComputeManagementClient(MultiApiClientMixin, _SDKClient): 'shared_galleries': '2022-03-03', 'shared_gallery_image_versions': '2022-03-03', 'shared_gallery_images': '2022-03-03', - 'snapshots': '2022-07-02', + 'snapshots': '2023-01-02', }}, _PROFILE_TAG + " latest" ) @@ -148,6 +148,7 @@ def models(cls, api_version=DEFAULT_API_VERSION): * 2022-08-01: :mod:`v2022_08_01.models` * 2022-09-04: :mod:`v2022_09_04.models` * 2022-11-01: :mod:`v2022_11_01.models` + * 2023-01-02: :mod:`v2023_01_02.models` * 2023-03-01: :mod:`v2023_03_01.models` """ if api_version == '2015-06-15': @@ -261,6 +262,9 @@ def models(cls, api_version=DEFAULT_API_VERSION): elif api_version == '2022-11-01': from ..v2022_11_01 import models return models + elif api_version == '2023-01-02': + from ..v2023_01_02 import models + return models elif api_version == '2023-03-01': from ..v2023_03_01 import models return models @@ -689,6 +693,7 @@ def disk_accesses(self): * 2021-12-01: :class:`DiskAccessesOperations` * 2022-03-02: :class:`DiskAccessesOperations` * 2022-07-02: :class:`DiskAccessesOperations` + * 2023-01-02: :class:`DiskAccessesOperations` """ api_version = self._get_api_version('disk_accesses') if api_version == '2020-05-01': @@ -709,6 +714,8 @@ def disk_accesses(self): from ..v2022_03_02.aio.operations import DiskAccessesOperations as OperationClass elif api_version == '2022-07-02': from ..v2022_07_02.aio.operations import DiskAccessesOperations as OperationClass + elif api_version == '2023-01-02': + from ..v2023_01_02.aio.operations import DiskAccessesOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disk_accesses'".format(api_version)) self._config.api_version = api_version @@ -729,6 +736,7 @@ def disk_encryption_sets(self): * 2021-12-01: :class:`DiskEncryptionSetsOperations` * 2022-03-02: :class:`DiskEncryptionSetsOperations` * 2022-07-02: :class:`DiskEncryptionSetsOperations` + * 2023-01-02: :class:`DiskEncryptionSetsOperations` """ api_version = self._get_api_version('disk_encryption_sets') if api_version == '2019-07-01': @@ -753,6 +761,8 @@ def disk_encryption_sets(self): from ..v2022_03_02.aio.operations import DiskEncryptionSetsOperations as OperationClass elif api_version == '2022-07-02': from ..v2022_07_02.aio.operations import DiskEncryptionSetsOperations as OperationClass + elif api_version == '2023-01-02': + from ..v2023_01_02.aio.operations import DiskEncryptionSetsOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disk_encryption_sets'".format(api_version)) self._config.api_version = api_version @@ -769,6 +779,7 @@ def disk_restore_point(self): * 2021-12-01: :class:`DiskRestorePointOperations` * 2022-03-02: :class:`DiskRestorePointOperations` * 2022-07-02: :class:`DiskRestorePointOperations` + * 2023-01-02: :class:`DiskRestorePointOperations` """ api_version = self._get_api_version('disk_restore_point') if api_version == '2020-09-30': @@ -785,6 +796,8 @@ def disk_restore_point(self): from ..v2022_03_02.aio.operations import DiskRestorePointOperations as OperationClass elif api_version == '2022-07-02': from ..v2022_07_02.aio.operations import DiskRestorePointOperations as OperationClass + elif api_version == '2023-01-02': + from ..v2023_01_02.aio.operations import DiskRestorePointOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disk_restore_point'".format(api_version)) self._config.api_version = api_version @@ -811,6 +824,7 @@ def disks(self): * 2021-12-01: :class:`DisksOperations` * 2022-03-02: :class:`DisksOperations` * 2022-07-02: :class:`DisksOperations` + * 2023-01-02: :class:`DisksOperations` """ api_version = self._get_api_version('disks') if api_version == '2016-04-30-preview': @@ -847,6 +861,8 @@ def disks(self): from ..v2022_03_02.aio.operations import DisksOperations as OperationClass elif api_version == '2022-07-02': from ..v2022_07_02.aio.operations import DisksOperations as OperationClass + elif api_version == '2023-01-02': + from ..v2023_01_02.aio.operations import DisksOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'disks'".format(api_version)) self._config.api_version = api_version @@ -1496,6 +1512,7 @@ def snapshots(self): * 2021-12-01: :class:`SnapshotsOperations` * 2022-03-02: :class:`SnapshotsOperations` * 2022-07-02: :class:`SnapshotsOperations` + * 2023-01-02: :class:`SnapshotsOperations` """ api_version = self._get_api_version('snapshots') if api_version == '2016-04-30-preview': @@ -1532,6 +1549,8 @@ def snapshots(self): from ..v2022_03_02.aio.operations import SnapshotsOperations as OperationClass elif api_version == '2022-07-02': from ..v2022_07_02.aio.operations import SnapshotsOperations as OperationClass + elif api_version == '2023-01-02': + from ..v2023_01_02.aio.operations import SnapshotsOperations as OperationClass else: raise ValueError("API version {} does not have operation group 'snapshots'".format(api_version)) self._config.api_version = api_version diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py index aea5bfe640ee4..68233845075cb 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/models.py @@ -6,6 +6,6 @@ # -------------------------------------------------------------------------- from .v2021_07_01.models import * from .v2022_03_03.models import * -from .v2022_07_02.models import * from .v2022_09_04.models import * +from .v2023_01_02.models import * from .v2023_03_01.models import * diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2015_06_15/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_09_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_09_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_09_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_09_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_06_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_09_30/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_04_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_04_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_04_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_04_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_07_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_12_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_10_01_preview/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_10_01_preview/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_10_01_preview/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_10_01_preview/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_12_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_12_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_12_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_12_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_08_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_08_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_08_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_08_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_10_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_10_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_10_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_10_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_11_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_11_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_11_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_11_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_12_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_12_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_12_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_12_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_01_03/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_01_03/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_01_03/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_01_03/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_02/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_02/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_02/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_02/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_03/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_03/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_03/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_03_03/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_04_04/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_04_04/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_04_04/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_04_04/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_07_02/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_07_02/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_07_02/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_07_02/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_compute_management_client_enums.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_compute_management_client_enums.py index d9d59cb93c409..beeede152cb50 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_compute_management_client_enums.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_compute_management_client_enums.py @@ -27,10 +27,8 @@ class AvailabilitySetSkuTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): class CachingTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the caching requirements. :code:`
`:code:`
` Possible values are: - :code:`
`:code:`
` **None** :code:`
`:code:`
` **ReadOnly** - :code:`
`:code:`
` **ReadWrite** :code:`
`:code:`
` Default: **None for Standard - storage. ReadOnly for Premium storage**. + """Specifies the caching requirements. Possible values are: **None** **ReadOnly** + **ReadWrite** Default: **None for Standard storage. ReadOnly for Premium storage**. """ NONE = "None" @@ -106,9 +104,8 @@ class DiskControllerTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): Generation 2 (https://docs.microsoft.com/en-us/azure/virtual-machines/generation-2), please check the HyperVGenerations capability returned as part of VM sku capabilities in the response of Microsoft.Compute SKUs api for the region contains V2 - (https://docs.microsoft.com/rest/api/compute/resourceskus/list) . :code:`
` For more - information about Disk Controller Types supported please refer to - https://aka.ms/azure-diskcontrollertypes. + (https://docs.microsoft.com/rest/api/compute/resourceskus/list) . For more information about + Disk Controller Types supported please refer to https://aka.ms/azure-diskcontrollertypes. """ SCSI = "SCSI" @@ -116,12 +113,12 @@ class DiskControllerTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): class DiskCreateOptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies how the virtual machine should be created.:code:`
`:code:`
` Possible values - are::code:`
`:code:`
` **Attach** \u2013 This value is used when you are using a - specialized disk to create the virtual machine.:code:`
`:code:`
` **FromImage** \u2013 - This value is used when you are using an image to create the virtual machine. If you are using - a platform image, you also use the imageReference element described above. If you are using a - marketplace image, you also use the plan element previously described. + """Specifies how the virtual machine should be created. Possible values are: **Attach** \u2013 + This value is used when you are using a specialized disk to create the virtual machine. + **FromImage** \u2013 This value is used when you are using an image to create the virtual + machine. If you are using a platform image, you also use the imageReference element described + above. If you are using a marketplace image, you also use the plan element previously + described. """ FROM_IMAGE = "FromImage" @@ -131,10 +128,9 @@ class DiskCreateOptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): class DiskDeleteOptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the behavior of the managed disk when the VM gets deleted i.e whether the managed - disk is deleted or detached. Supported values::code:`
`:code:`
` **Delete** If this value - is used, the managed disk is deleted when VM gets deleted.:code:`
`:code:`
` **Detach** - If this value is used, the managed disk is retained after VM gets - deleted.:code:`
`:code:`
` Minimum api-version: 2021-03-01. + disk is deleted or detached. Supported values: **Delete** If this value is used, the managed + disk is deleted when VM gets deleted. **Detach** If this value is used, the managed disk is + retained after VM gets deleted. Minimum api-version: 2021-03-01. """ DELETE = "Delete" @@ -144,13 +140,13 @@ class DiskDeleteOptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): class DiskDetachOptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: **ForceDetach**. - :code:`
`:code:`
` detachOption: **ForceDetach** is applicable only for managed data - disks. If a previous detachment attempt of the data disk did not complete due to an unexpected - failure from the virtual machine and the disk is still not released then use force-detach as a - last resort option to detach the disk forcibly from the VM. All writes might not have been - flushed when using this detach behavior. :code:`
`:code:`
` This feature is still in - preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk - update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. + detachOption: **ForceDetach** is applicable only for managed data disks. If a previous + detachment attempt of the data disk did not complete due to an unexpected failure from the + virtual machine and the disk is still not released then use force-detach as a last resort + option to detach the disk forcibly from the VM. All writes might not have been flushed when + using this detach behavior. This feature is still in preview mode and is not supported for + VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with + setting detachOption: 'ForceDetach'. """ FORCE_DETACH = "ForceDetach" @@ -306,8 +302,7 @@ class OperatingSystemType(str, Enum, metaclass=CaseInsensitiveEnumMeta): class OperatingSystemTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """This property allows you to specify the type of the OS that is included in the disk if creating - a VM from user-image or a specialized VHD. :code:`
`:code:`
` Possible values are: - :code:`
`:code:`
` **Windows** :code:`
`:code:`
` **Linux**. + a VM from user-image or a specialized VHD. Possible values are: **Windows** **Linux**. """ WINDOWS = "Windows" @@ -375,9 +370,7 @@ class PatchOperationStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): class ProtocolTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the protocol of WinRM listener. :code:`
`:code:`
` Possible values are: - :code:`
`\ **http** :code:`
`:code:`
` **https**. - """ + """Specifies the protocol of WinRM listener. Possible values are: **http** **https**.""" HTTP = "Http" HTTPS = "Https" @@ -466,10 +459,9 @@ class RollingUpgradeStatusCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): class SecurityEncryptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the EncryptionType of the managed disk. :code:`
` It is set to - DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, and - VMGuestStateOnly for encryption of just the VMGuestState blob. :code:`
`:code:`
` NOTE: - It can be set for only Confidential VMs. + """Specifies the EncryptionType of the managed disk. It is set to DiskWithVMGuestState for + encryption of the managed disk along with VMGuestState blob, and VMGuestStateOnly for + encryption of just the VMGuestState blob. NOTE: It can be set for only Confidential VMs. """ VM_GUEST_STATE_ONLY = "VMGuestStateOnly" @@ -478,8 +470,7 @@ class SecurityEncryptionTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): class SecurityTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the SecurityType of the virtual machine. It has to be set to any specified value to - enable UefiSettings. :code:`
`:code:`
` Default: UefiSettings will not be enabled unless - this property is set. + enable UefiSettings. Default: UefiSettings will not be enabled unless this property is set. """ TRUSTED_LAUNCH = "TrustedLaunch" @@ -562,8 +553,8 @@ class VirtualMachineEvictionPolicyTypes(str, Enum, metaclass=CaseInsensitiveEnum class VirtualMachinePriorityTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the priority for a standalone virtual machine or the virtual machines in the scale - set. :code:`
`:code:`
` 'Low' enum will be deprecated in the future, please use 'Spot' as - the enum to deploy Azure Spot VM/VMSS. + set. 'Low' enum will be deprecated in the future, please use 'Spot' as the enum to deploy + Azure Spot VM/VMSS. """ REGULAR = "Regular" @@ -587,18 +578,17 @@ class VirtualMachineScaleSetSkuScaleType(str, Enum, metaclass=CaseInsensitiveEnu class VirtualMachineSizeTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the size of the virtual machine. :code:`
`:code:`
` The enum data type is - currently deprecated and will be removed by December 23rd 2023. :code:`
`:code:`
` - Recommended way to get the list of available sizes is using these APIs: - :code:`
`:code:`
` `List all available virtual machine sizes in an availability set - `_ - :code:`
`:code:`
` `List all available virtual machine sizes in a region - `_ :code:`
`:code:`
` - `List all available virtual machine sizes for resizing + """Specifies the size of the virtual machine. The enum data type is currently deprecated and will + be removed by December 23rd 2023. Recommended way to get the list of available sizes is using + these APIs: `List all available virtual machine sizes in an availability set + `_ `List all + available virtual machine sizes in a region + `_ `List all available virtual + machine sizes for resizing `_. For more information about virtual machine sizes, see `Sizes for virtual machines - `_. :code:`
`:code:`
` The - available VM sizes depend on region and availability set. + `_. The available VM sizes depend on + region and availability set. """ BASIC_A0 = "Basic_A0" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_models_py3.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_models_py3.py index c0ce9aa3af877..4bedf3b1cf1da 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_models_py3.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_08_01/models/_models_py3.py @@ -750,21 +750,19 @@ def __init__(self, **kwargs: Any) -> None: class BillingProfile(_serialization.Model): - """Specifies the billing related details of a Azure Spot VM or VMSS. - :code:`
`:code:`
`Minimum api-version: 2019-03-01. + """Specifies the billing related details of a Azure Spot VM or VMSS. Minimum api-version: + 2019-03-01. :ivar max_price: Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. - This price is in US Dollars. :code:`
`:code:`
` This price will be compared with the - current Azure Spot price for the VM size. Also, the prices are compared at the time of - create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is - greater than the current Azure Spot price. :code:`
`:code:`
` The maxPrice will also be - used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice - after creation of VM/VMSS. :code:`
`:code:`
` Possible values are: - :code:`
`:code:`
` - Any decimal value greater than zero. Example: 0.01538 - :code:`
`:code:`
` -1 – indicates default price to be up-to on-demand. - :code:`
`:code:`
` You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS - should not be evicted for price reasons. Also, the default max price is -1 if it is not - provided by you. :code:`
`:code:`
`Minimum api-version: 2019-03-01. + This price is in US Dollars. This price will be compared with the current Azure Spot price for + the VM size. Also, the prices are compared at the time of create/update of Azure Spot VM/VMSS + and the operation will only succeed if the maxPrice is greater than the current Azure Spot + price. The maxPrice will also be used for evicting a Azure Spot VM/VMSS if the current Azure + Spot price goes beyond the maxPrice after creation of VM/VMSS. Possible values are: - Any + decimal value greater than zero. Example: 0.01538 -1 – indicates default price to be up-to + on-demand. You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS should not + be evicted for price reasons. Also, the default max price is -1 if it is not provided by you. + Minimum api-version: 2019-03-01. :vartype max_price: float """ @@ -775,17 +773,15 @@ class BillingProfile(_serialization.Model): def __init__(self, *, max_price: Optional[float] = None, **kwargs: Any) -> None: """ :keyword max_price: Specifies the maximum price you are willing to pay for a Azure Spot - VM/VMSS. This price is in US Dollars. :code:`
`:code:`
` This price will be compared with - the current Azure Spot price for the VM size. Also, the prices are compared at the time of - create/update of Azure Spot VM/VMSS and the operation will only succeed if the maxPrice is - greater than the current Azure Spot price. :code:`
`:code:`
` The maxPrice will also be - used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice - after creation of VM/VMSS. :code:`
`:code:`
` Possible values are: - :code:`
`:code:`
` - Any decimal value greater than zero. Example: 0.01538 - :code:`
`:code:`
` -1 – indicates default price to be up-to on-demand. - :code:`
`:code:`
` You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS - should not be evicted for price reasons. Also, the default max price is -1 if it is not - provided by you. :code:`
`:code:`
`Minimum api-version: 2019-03-01. + VM/VMSS. This price is in US Dollars. This price will be compared with the current Azure Spot + price for the VM size. Also, the prices are compared at the time of create/update of Azure Spot + VM/VMSS and the operation will only succeed if the maxPrice is greater than the current Azure + Spot price. The maxPrice will also be used for evicting a Azure Spot VM/VMSS if the current + Azure Spot price goes beyond the maxPrice after creation of VM/VMSS. Possible values are: - + Any decimal value greater than zero. Example: 0.01538 -1 – indicates default price to be up-to + on-demand. You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS should not + be evicted for price reasons. Also, the default max price is -1 if it is not provided by you. + Minimum api-version: 2019-03-01. :paramtype max_price: float """ super().__init__(**kwargs) @@ -794,15 +790,14 @@ def __init__(self, *, max_price: Optional[float] = None, **kwargs: Any) -> None: class BootDiagnostics(_serialization.Model): """Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot - to diagnose VM status. :code:`
`:code:`
` You can easily view the output of your console - log. :code:`
`:code:`
` Azure also enables you to see a screenshot of the VM from the - hypervisor. + to diagnose VM status. You can easily view the output of your console log. Azure also enables + you to see a screenshot of the VM from the hypervisor. :ivar enabled: Whether boot diagnostics should be enabled on the Virtual Machine. :vartype enabled: bool :ivar storage_uri: Uri of the storage account to use for placing the console output and - screenshot. :code:`
`:code:`
`If storageUri is not specified while enabling boot - diagnostics, managed storage will be used. + screenshot. If storageUri is not specified while enabling boot diagnostics, managed storage + will be used. :vartype storage_uri: str """ @@ -816,8 +811,8 @@ def __init__(self, *, enabled: Optional[bool] = None, storage_uri: Optional[str] :keyword enabled: Whether boot diagnostics should be enabled on the Virtual Machine. :paramtype enabled: bool :keyword storage_uri: Uri of the storage account to use for placing the console output and - screenshot. :code:`
`:code:`
`If storageUri is not specified while enabling boot - diagnostics, managed storage will be used. + screenshot. If storageUri is not specified while enabling boot diagnostics, managed storage + will be used. :paramtype storage_uri: str """ super().__init__(**kwargs) @@ -830,16 +825,14 @@ class BootDiagnosticsInstanceView(_serialization.Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar console_screenshot_blob_uri: The console screenshot blob URI. - :code:`
`:code:`
`NOTE: This will **not** be set if boot diagnostics is currently enabled - with managed storage. + :ivar console_screenshot_blob_uri: The console screenshot blob URI. NOTE: This will **not** be + set if boot diagnostics is currently enabled with managed storage. :vartype console_screenshot_blob_uri: str - :ivar serial_console_log_blob_uri: The serial console log blob Uri. - :code:`
`:code:`
`NOTE: This will **not** be set if boot diagnostics is currently enabled - with managed storage. + :ivar serial_console_log_blob_uri: The serial console log blob Uri. NOTE: This will **not** be + set if boot diagnostics is currently enabled with managed storage. :vartype serial_console_log_blob_uri: str - :ivar status: The boot diagnostics status information for the VM. :code:`
`:code:`
` - NOTE: It will be set only if there are errors encountered in enabling boot diagnostics. + :ivar status: The boot diagnostics status information for the VM. NOTE: It will be set only if + there are errors encountered in enabling boot diagnostics. :vartype status: ~azure.mgmt.compute.v2022_08_01.models.InstanceViewStatus """ @@ -1520,27 +1513,23 @@ class DataDisk(_serialization.Model): # pylint: disable=too-many-instance-attri before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. :vartype image: ~azure.mgmt.compute.v2022_08_01.models.VirtualHardDisk - :ivar caching: Specifies the caching requirements. :code:`
`:code:`
` Possible values - are: :code:`
`:code:`
` **None** :code:`
`:code:`
` **ReadOnly** - :code:`
`:code:`
` **ReadWrite** :code:`
`:code:`
` Default: **None for Standard - storage. ReadOnly for Premium storage**. Known values are: "None", "ReadOnly", and "ReadWrite". + :ivar caching: Specifies the caching requirements. Possible values are: **None** + **ReadOnly** **ReadWrite** Default: **None for Standard storage. ReadOnly for Premium + storage**. Known values are: "None", "ReadOnly", and "ReadWrite". :vartype caching: str or ~azure.mgmt.compute.v2022_08_01.models.CachingTypes :ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or disabled on the disk. :vartype write_accelerator_enabled: bool - :ivar create_option: Specifies how the virtual machine should be - created.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Attach** \u2013 - This value is used when you are using a specialized disk to create the virtual - machine.:code:`
`:code:`
` **FromImage** \u2013 This value is used when you are using an - image to create the virtual machine. If you are using a platform image, you also use the - imageReference element described above. If you are using a marketplace image, you also use the - plan element previously described. Required. Known values are: "FromImage", "Empty", and - "Attach". + :ivar create_option: Specifies how the virtual machine should be created. Possible values are: + **Attach** \u2013 This value is used when you are using a specialized disk to create the + virtual machine. **FromImage** \u2013 This value is used when you are using an image to create + the virtual machine. If you are using a platform image, you also use the imageReference element + described above. If you are using a marketplace image, you also use the plan element + previously described. Required. Known values are: "FromImage", "Empty", and "Attach". :vartype create_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskCreateOptionTypes :ivar disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be - used to overwrite the size of the disk in a virtual machine image. :code:`
`:code:`
` - diskSizeGB is the number of bytes x 1024^3 for the disk and the value cannot be larger than - 1023. + used to overwrite the size of the disk in a virtual machine image. diskSizeGB is the number of + bytes x 1024^3 for the disk and the value cannot be larger than 1023. :vartype disk_size_gb: int :ivar managed_disk: The managed disk parameters. :vartype managed_disk: ~azure.mgmt.compute.v2022_08_01.models.ManagedDiskParameters @@ -1557,19 +1546,17 @@ class DataDisk(_serialization.Model): # pylint: disable=too-many-instance-attri :vartype disk_m_bps_read_write: int :ivar detach_option: Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: - **ForceDetach**. :code:`
`:code:`
` detachOption: **ForceDetach** is applicable only for - managed data disks. If a previous detachment attempt of the data disk did not complete due to - an unexpected failure from the virtual machine and the disk is still not released then use - force-detach as a last resort option to detach the disk forcibly from the VM. All writes might - not have been flushed when using this detach behavior. :code:`
`:code:`
` This feature is - still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data - disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. - "ForceDetach" + **ForceDetach**. detachOption: **ForceDetach** is applicable only for managed data disks. If a + previous detachment attempt of the data disk did not complete due to an unexpected failure from + the virtual machine and the disk is still not released then use force-detach as a last resort + option to detach the disk forcibly from the VM. All writes might not have been flushed when + using this detach behavior. This feature is still in preview mode and is not supported for + VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with + setting detachOption: 'ForceDetach'. "ForceDetach" :vartype detach_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskDetachOptionTypes :ivar delete_option: Specifies whether data disk should be deleted or detached upon VM - deletion.:code:`
`:code:`
` Possible values: :code:`
`:code:`
` **Delete** If this - value is used, the data disk is deleted when VM is deleted.:code:`
`:code:`
` **Detach** - If this value is used, the data disk is retained after VM is deleted.:code:`
`:code:`
` + deletion. Possible values: **Delete** If this value is used, the data disk is deleted when VM + is deleted. **Detach** If this value is used, the data disk is retained after VM is deleted. The default value is set to **detach**. Known values are: "Delete" and "Detach". :vartype delete_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskDeleteOptionTypes """ @@ -1628,27 +1615,23 @@ def __init__( before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. :paramtype image: ~azure.mgmt.compute.v2022_08_01.models.VirtualHardDisk - :keyword caching: Specifies the caching requirements. :code:`
`:code:`
` Possible values - are: :code:`
`:code:`
` **None** :code:`
`:code:`
` **ReadOnly** - :code:`
`:code:`
` **ReadWrite** :code:`
`:code:`
` Default: **None for Standard - storage. ReadOnly for Premium storage**. Known values are: "None", "ReadOnly", and "ReadWrite". + :keyword caching: Specifies the caching requirements. Possible values are: **None** + **ReadOnly** **ReadWrite** Default: **None for Standard storage. ReadOnly for Premium + storage**. Known values are: "None", "ReadOnly", and "ReadWrite". :paramtype caching: str or ~azure.mgmt.compute.v2022_08_01.models.CachingTypes :keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or disabled on the disk. :paramtype write_accelerator_enabled: bool - :keyword create_option: Specifies how the virtual machine should be - created.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Attach** \u2013 - This value is used when you are using a specialized disk to create the virtual - machine.:code:`
`:code:`
` **FromImage** \u2013 This value is used when you are using an - image to create the virtual machine. If you are using a platform image, you also use the - imageReference element described above. If you are using a marketplace image, you also use the - plan element previously described. Required. Known values are: "FromImage", "Empty", and - "Attach". + :keyword create_option: Specifies how the virtual machine should be created. Possible values + are: **Attach** \u2013 This value is used when you are using a specialized disk to create the + virtual machine. **FromImage** \u2013 This value is used when you are using an image to create + the virtual machine. If you are using a platform image, you also use the imageReference element + described above. If you are using a marketplace image, you also use the plan element + previously described. Required. Known values are: "FromImage", "Empty", and "Attach". :paramtype create_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskCreateOptionTypes :keyword disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can - be used to overwrite the size of the disk in a virtual machine image. :code:`
`:code:`
` - diskSizeGB is the number of bytes x 1024^3 for the disk and the value cannot be larger than - 1023. + be used to overwrite the size of the disk in a virtual machine image. diskSizeGB is the number + of bytes x 1024^3 for the disk and the value cannot be larger than 1023. :paramtype disk_size_gb: int :keyword managed_disk: The managed disk parameters. :paramtype managed_disk: ~azure.mgmt.compute.v2022_08_01.models.ManagedDiskParameters @@ -1657,19 +1640,17 @@ def __init__( :paramtype to_be_detached: bool :keyword detach_option: Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the virtual machine. Supported values: - **ForceDetach**. :code:`
`:code:`
` detachOption: **ForceDetach** is applicable only for - managed data disks. If a previous detachment attempt of the data disk did not complete due to - an unexpected failure from the virtual machine and the disk is still not released then use - force-detach as a last resort option to detach the disk forcibly from the VM. All writes might - not have been flushed when using this detach behavior. :code:`
`:code:`
` This feature is - still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data - disk update toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. - "ForceDetach" + **ForceDetach**. detachOption: **ForceDetach** is applicable only for managed data disks. If a + previous detachment attempt of the data disk did not complete due to an unexpected failure from + the virtual machine and the disk is still not released then use force-detach as a last resort + option to detach the disk forcibly from the VM. All writes might not have been flushed when + using this detach behavior. This feature is still in preview mode and is not supported for + VirtualMachineScaleSet. To force-detach a data disk update toBeDetached to 'true' along with + setting detachOption: 'ForceDetach'. "ForceDetach" :paramtype detach_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskDetachOptionTypes :keyword delete_option: Specifies whether data disk should be deleted or detached upon VM - deletion.:code:`
`:code:`
` Possible values: :code:`
`:code:`
` **Delete** If this - value is used, the data disk is deleted when VM is deleted.:code:`
`:code:`
` **Detach** - If this value is used, the data disk is retained after VM is deleted.:code:`
`:code:`
` + deletion. Possible values: **Delete** If this value is used, the data disk is deleted when VM + is deleted. **Detach** If this value is used, the data disk is retained after VM is deleted. The default value is set to **detach**. Known values are: "Delete" and "Detach". :paramtype delete_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskDeleteOptionTypes """ @@ -2413,15 +2394,13 @@ def __init__( class DiagnosticsProfile(_serialization.Model): - """Specifies the boot diagnostic settings state. :code:`
`:code:`
`Minimum api-version: - 2015-06-15. + """Specifies the boot diagnostic settings state. Minimum api-version: 2015-06-15. :ivar boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view - Console Output and Screenshot to diagnose VM status. :code:`
`\ **NOTE**\ : If storageUri is - being specified then ensure that the storage account is in the same region and subscription as - the VM. :code:`
`:code:`
` You can easily view the output of your console log. - :code:`
`:code:`
` Azure also enables you to see a screenshot of the VM from the - hypervisor. + Console Output and Screenshot to diagnose VM status. **NOTE**\ : If storageUri is being + specified then ensure that the storage account is in the same region and subscription as the + VM. You can easily view the output of your console log. Azure also enables you to see a + screenshot of the VM from the hypervisor. :vartype boot_diagnostics: ~azure.mgmt.compute.v2022_08_01.models.BootDiagnostics """ @@ -2432,11 +2411,10 @@ class DiagnosticsProfile(_serialization.Model): def __init__(self, *, boot_diagnostics: Optional["_models.BootDiagnostics"] = None, **kwargs: Any) -> None: """ :keyword boot_diagnostics: Boot Diagnostics is a debugging feature which allows you to view - Console Output and Screenshot to diagnose VM status. :code:`
`\ **NOTE**\ : If storageUri is - being specified then ensure that the storage account is in the same region and subscription as - the VM. :code:`
`:code:`
` You can easily view the output of your console log. - :code:`
`:code:`
` Azure also enables you to see a screenshot of the VM from the - hypervisor. + Console Output and Screenshot to diagnose VM status. **NOTE**\ : If storageUri is being + specified then ensure that the storage account is in the same region and subscription as the + VM. You can easily view the output of your console log. Azure also enables you to see a + screenshot of the VM from the hypervisor. :paramtype boot_diagnostics: ~azure.mgmt.compute.v2022_08_01.models.BootDiagnostics """ super().__init__(**kwargs) @@ -2445,17 +2423,14 @@ def __init__(self, *, boot_diagnostics: Optional["_models.BootDiagnostics"] = No class DiffDiskSettings(_serialization.Model): """Describes the parameters of ephemeral disk settings that can be specified for operating system - disk. :code:`
`:code:`
` NOTE: The ephemeral disk settings can only be specified for - managed disk. + disk. NOTE: The ephemeral disk settings can only be specified for managed disk. :ivar option: Specifies the ephemeral disk settings for operating system disk. "Local" :vartype option: str or ~azure.mgmt.compute.v2022_08_01.models.DiffDiskOptions - :ivar placement: Specifies the ephemeral disk placement for operating system - disk.:code:`
`:code:`
` Possible values are: :code:`
`:code:`
` **CacheDisk** - :code:`
`:code:`
` **ResourceDisk** :code:`
`:code:`
` Default: **CacheDisk** if - one is configured for the VM size otherwise **ResourceDisk** is used.:code:`
`:code:`
` - Refer to VM size documentation for Windows VM at - https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at + :ivar placement: Specifies the ephemeral disk placement for operating system disk. Possible + values are: **CacheDisk** **ResourceDisk** Default: **CacheDisk** if one is configured for + the VM size otherwise **ResourceDisk** is used. Refer to VM size documentation for Windows VM + at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Known values are: "CacheDisk" and "ResourceDisk". :vartype placement: str or ~azure.mgmt.compute.v2022_08_01.models.DiffDiskPlacement @@ -2476,12 +2451,10 @@ def __init__( """ :keyword option: Specifies the ephemeral disk settings for operating system disk. "Local" :paramtype option: str or ~azure.mgmt.compute.v2022_08_01.models.DiffDiskOptions - :keyword placement: Specifies the ephemeral disk placement for operating system - disk.:code:`
`:code:`
` Possible values are: :code:`
`:code:`
` **CacheDisk** - :code:`
`:code:`
` **ResourceDisk** :code:`
`:code:`
` Default: **CacheDisk** if - one is configured for the VM size otherwise **ResourceDisk** is used.:code:`
`:code:`
` - Refer to VM size documentation for Windows VM at - https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at + :keyword placement: Specifies the ephemeral disk placement for operating system disk. Possible + values are: **CacheDisk** **ResourceDisk** Default: **CacheDisk** if one is configured for + the VM size otherwise **ResourceDisk** is used. Refer to VM size documentation for Windows VM + at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check which VM sizes exposes a cache disk. Known values are: "CacheDisk" and "ResourceDisk". :paramtype placement: str or ~azure.mgmt.compute.v2022_08_01.models.DiffDiskPlacement @@ -2535,9 +2508,8 @@ def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylin class DiskEncryptionSetParameters(SubResource): """Describes the parameter of customer managed disk encryption set resource id that can be - specified for disk. :code:`
`:code:`
` NOTE: The disk encryption set resource id can only - be specified for managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more - details. + specified for disk. NOTE: The disk encryption set resource id can only be specified for + managed disk. Please refer https://aka.ms/mdssewithcmkoverview for more details. :ivar id: Resource Id. :vartype id: str @@ -2601,8 +2573,8 @@ class DiskInstanceView(_serialization.Model): :ivar name: The disk name. :vartype name: str - :ivar encryption_settings: Specifies the encryption settings for the OS Disk. - :code:`
`:code:`
` Minimum api-version: 2015-06-15. + :ivar encryption_settings: Specifies the encryption settings for the OS Disk. Minimum + api-version: 2015-06-15. :vartype encryption_settings: list[~azure.mgmt.compute.v2022_08_01.models.DiskEncryptionSettings] :ivar statuses: The resource status information. @@ -2626,8 +2598,8 @@ def __init__( """ :keyword name: The disk name. :paramtype name: str - :keyword encryption_settings: Specifies the encryption settings for the OS Disk. - :code:`
`:code:`
` Minimum api-version: 2015-06-15. + :keyword encryption_settings: Specifies the encryption settings for the OS Disk. Minimum + api-version: 2015-06-15. :paramtype encryption_settings: list[~azure.mgmt.compute.v2022_08_01.models.DiskEncryptionSettings] :keyword statuses: The resource status information. @@ -2740,36 +2712,36 @@ def __init__( class HardwareProfile(_serialization.Model): """Specifies the hardware settings for the virtual machine. - :ivar vm_size: Specifies the size of the virtual machine. :code:`
`:code:`
` The enum - data type is currently deprecated and will be removed by December 23rd 2023. - :code:`
`:code:`
` Recommended way to get the list of available sizes is using these - APIs: :code:`
`:code:`
` `List all available virtual machine sizes in an availability set - `_ - :code:`
`:code:`
` `List all available virtual machine sizes in a region - `_ :code:`
`:code:`
` - `List all available virtual machine sizes for resizing + :ivar vm_size: Specifies the size of the virtual machine. The enum data type is currently + deprecated and will be removed by December 23rd 2023. Recommended way to get the list of + available sizes is using these APIs: `List all available virtual machine sizes in an + availability set + `_ `List all + available virtual machine sizes in a region + `_ `List all available virtual + machine sizes for resizing `_. For more information about virtual machine sizes, see `Sizes for virtual machines - `_. :code:`
`:code:`
` The - available VM sizes depend on region and availability set. Known values are: "Basic_A0", - "Basic_A1", "Basic_A2", "Basic_A3", "Basic_A4", "Standard_A0", "Standard_A1", "Standard_A2", - "Standard_A3", "Standard_A4", "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", - "Standard_A9", "Standard_A10", "Standard_A11", "Standard_A1_v2", "Standard_A2_v2", - "Standard_A4_v2", "Standard_A8_v2", "Standard_A2m_v2", "Standard_A4m_v2", "Standard_A8m_v2", - "Standard_B1s", "Standard_B1ms", "Standard_B2s", "Standard_B2ms", "Standard_B4ms", - "Standard_B8ms", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4", "Standard_D11", - "Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2", "Standard_D2_v2", - "Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D2_v3", "Standard_D4_v3", - "Standard_D8_v3", "Standard_D16_v3", "Standard_D32_v3", "Standard_D64_v3", "Standard_D2s_v3", - "Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3", "Standard_D32s_v3", - "Standard_D64s_v3", "Standard_D11_v2", "Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2", - "Standard_D15_v2", "Standard_DS1", "Standard_DS2", "Standard_DS3", "Standard_DS4", - "Standard_DS11", "Standard_DS12", "Standard_DS13", "Standard_DS14", "Standard_DS1_v2", - "Standard_DS2_v2", "Standard_DS3_v2", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2", - "Standard_DS12_v2", "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2", - "Standard_DS13-4_v2", "Standard_DS13-2_v2", "Standard_DS14-8_v2", "Standard_DS14-4_v2", - "Standard_E2_v3", "Standard_E4_v3", "Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3", - "Standard_E64_v3", "Standard_E2s_v3", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", + `_. The available VM sizes depend on + region and availability set. Known values are: "Basic_A0", "Basic_A1", "Basic_A2", "Basic_A3", + "Basic_A4", "Standard_A0", "Standard_A1", "Standard_A2", "Standard_A3", "Standard_A4", + "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", "Standard_A9", "Standard_A10", + "Standard_A11", "Standard_A1_v2", "Standard_A2_v2", "Standard_A4_v2", "Standard_A8_v2", + "Standard_A2m_v2", "Standard_A4m_v2", "Standard_A8m_v2", "Standard_B1s", "Standard_B1ms", + "Standard_B2s", "Standard_B2ms", "Standard_B4ms", "Standard_B8ms", "Standard_D1", + "Standard_D2", "Standard_D3", "Standard_D4", "Standard_D11", "Standard_D12", "Standard_D13", + "Standard_D14", "Standard_D1_v2", "Standard_D2_v2", "Standard_D3_v2", "Standard_D4_v2", + "Standard_D5_v2", "Standard_D2_v3", "Standard_D4_v3", "Standard_D8_v3", "Standard_D16_v3", + "Standard_D32_v3", "Standard_D64_v3", "Standard_D2s_v3", "Standard_D4s_v3", "Standard_D8s_v3", + "Standard_D16s_v3", "Standard_D32s_v3", "Standard_D64s_v3", "Standard_D11_v2", + "Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2", "Standard_D15_v2", "Standard_DS1", + "Standard_DS2", "Standard_DS3", "Standard_DS4", "Standard_DS11", "Standard_DS12", + "Standard_DS13", "Standard_DS14", "Standard_DS1_v2", "Standard_DS2_v2", "Standard_DS3_v2", + "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2", "Standard_DS12_v2", + "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2", "Standard_DS13-4_v2", + "Standard_DS13-2_v2", "Standard_DS14-8_v2", "Standard_DS14-4_v2", "Standard_E2_v3", + "Standard_E4_v3", "Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3", "Standard_E64_v3", + "Standard_E2s_v3", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", "Standard_E32s_v3", "Standard_E64s_v3", "Standard_E32-16_v3", "Standard_E32-8s_v3", "Standard_E64-32s_v3", "Standard_E64-16s_v3", "Standard_F1", "Standard_F2", "Standard_F4", "Standard_F8", "Standard_F16", "Standard_F1s", "Standard_F2s", "Standard_F4s", "Standard_F8s", @@ -2788,10 +2760,9 @@ class HardwareProfile(_serialization.Model): "Standard_NV24". :vartype vm_size: str or ~azure.mgmt.compute.v2022_08_01.models.VirtualMachineSizeTypes :ivar vm_size_properties: Specifies the properties for customizing the size of the virtual - machine. Minimum api-version: 2021-07-01. :code:`
`:code:`
` This feature is still in - preview mode and is not supported for VirtualMachineScaleSet. :code:`
`:code:`
` Please - follow the instructions in `VM Customization `_ for more - details. + machine. Minimum api-version: 2021-07-01. This feature is still in preview mode and is not + supported for VirtualMachineScaleSet. Please follow the instructions in `VM Customization + `_ for more details. :vartype vm_size_properties: ~azure.mgmt.compute.v2022_08_01.models.VMSizeProperties """ @@ -2808,36 +2779,36 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword vm_size: Specifies the size of the virtual machine. :code:`
`:code:`
` The enum - data type is currently deprecated and will be removed by December 23rd 2023. - :code:`
`:code:`
` Recommended way to get the list of available sizes is using these - APIs: :code:`
`:code:`
` `List all available virtual machine sizes in an availability set - `_ - :code:`
`:code:`
` `List all available virtual machine sizes in a region - `_ :code:`
`:code:`
` - `List all available virtual machine sizes for resizing + :keyword vm_size: Specifies the size of the virtual machine. The enum data type is currently + deprecated and will be removed by December 23rd 2023. Recommended way to get the list of + available sizes is using these APIs: `List all available virtual machine sizes in an + availability set + `_ `List all + available virtual machine sizes in a region + `_ `List all available virtual + machine sizes for resizing `_. For more information about virtual machine sizes, see `Sizes for virtual machines - `_. :code:`
`:code:`
` The - available VM sizes depend on region and availability set. Known values are: "Basic_A0", - "Basic_A1", "Basic_A2", "Basic_A3", "Basic_A4", "Standard_A0", "Standard_A1", "Standard_A2", - "Standard_A3", "Standard_A4", "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", - "Standard_A9", "Standard_A10", "Standard_A11", "Standard_A1_v2", "Standard_A2_v2", - "Standard_A4_v2", "Standard_A8_v2", "Standard_A2m_v2", "Standard_A4m_v2", "Standard_A8m_v2", - "Standard_B1s", "Standard_B1ms", "Standard_B2s", "Standard_B2ms", "Standard_B4ms", - "Standard_B8ms", "Standard_D1", "Standard_D2", "Standard_D3", "Standard_D4", "Standard_D11", - "Standard_D12", "Standard_D13", "Standard_D14", "Standard_D1_v2", "Standard_D2_v2", - "Standard_D3_v2", "Standard_D4_v2", "Standard_D5_v2", "Standard_D2_v3", "Standard_D4_v3", - "Standard_D8_v3", "Standard_D16_v3", "Standard_D32_v3", "Standard_D64_v3", "Standard_D2s_v3", - "Standard_D4s_v3", "Standard_D8s_v3", "Standard_D16s_v3", "Standard_D32s_v3", - "Standard_D64s_v3", "Standard_D11_v2", "Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2", - "Standard_D15_v2", "Standard_DS1", "Standard_DS2", "Standard_DS3", "Standard_DS4", - "Standard_DS11", "Standard_DS12", "Standard_DS13", "Standard_DS14", "Standard_DS1_v2", - "Standard_DS2_v2", "Standard_DS3_v2", "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2", - "Standard_DS12_v2", "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2", - "Standard_DS13-4_v2", "Standard_DS13-2_v2", "Standard_DS14-8_v2", "Standard_DS14-4_v2", - "Standard_E2_v3", "Standard_E4_v3", "Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3", - "Standard_E64_v3", "Standard_E2s_v3", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", + `_. The available VM sizes depend on + region and availability set. Known values are: "Basic_A0", "Basic_A1", "Basic_A2", "Basic_A3", + "Basic_A4", "Standard_A0", "Standard_A1", "Standard_A2", "Standard_A3", "Standard_A4", + "Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", "Standard_A9", "Standard_A10", + "Standard_A11", "Standard_A1_v2", "Standard_A2_v2", "Standard_A4_v2", "Standard_A8_v2", + "Standard_A2m_v2", "Standard_A4m_v2", "Standard_A8m_v2", "Standard_B1s", "Standard_B1ms", + "Standard_B2s", "Standard_B2ms", "Standard_B4ms", "Standard_B8ms", "Standard_D1", + "Standard_D2", "Standard_D3", "Standard_D4", "Standard_D11", "Standard_D12", "Standard_D13", + "Standard_D14", "Standard_D1_v2", "Standard_D2_v2", "Standard_D3_v2", "Standard_D4_v2", + "Standard_D5_v2", "Standard_D2_v3", "Standard_D4_v3", "Standard_D8_v3", "Standard_D16_v3", + "Standard_D32_v3", "Standard_D64_v3", "Standard_D2s_v3", "Standard_D4s_v3", "Standard_D8s_v3", + "Standard_D16s_v3", "Standard_D32s_v3", "Standard_D64s_v3", "Standard_D11_v2", + "Standard_D12_v2", "Standard_D13_v2", "Standard_D14_v2", "Standard_D15_v2", "Standard_DS1", + "Standard_DS2", "Standard_DS3", "Standard_DS4", "Standard_DS11", "Standard_DS12", + "Standard_DS13", "Standard_DS14", "Standard_DS1_v2", "Standard_DS2_v2", "Standard_DS3_v2", + "Standard_DS4_v2", "Standard_DS5_v2", "Standard_DS11_v2", "Standard_DS12_v2", + "Standard_DS13_v2", "Standard_DS14_v2", "Standard_DS15_v2", "Standard_DS13-4_v2", + "Standard_DS13-2_v2", "Standard_DS14-8_v2", "Standard_DS14-4_v2", "Standard_E2_v3", + "Standard_E4_v3", "Standard_E8_v3", "Standard_E16_v3", "Standard_E32_v3", "Standard_E64_v3", + "Standard_E2s_v3", "Standard_E4s_v3", "Standard_E8s_v3", "Standard_E16s_v3", "Standard_E32s_v3", "Standard_E64s_v3", "Standard_E32-16_v3", "Standard_E32-8s_v3", "Standard_E64-32s_v3", "Standard_E64-16s_v3", "Standard_F1", "Standard_F2", "Standard_F4", "Standard_F8", "Standard_F16", "Standard_F1s", "Standard_F2s", "Standard_F4s", "Standard_F8s", @@ -2856,10 +2827,9 @@ def __init__( "Standard_NV24". :paramtype vm_size: str or ~azure.mgmt.compute.v2022_08_01.models.VirtualMachineSizeTypes :keyword vm_size_properties: Specifies the properties for customizing the size of the virtual - machine. Minimum api-version: 2021-07-01. :code:`
`:code:`
` This feature is still in - preview mode and is not supported for VirtualMachineScaleSet. :code:`
`:code:`
` Please - follow the instructions in `VM Customization `_ for more - details. + machine. Minimum api-version: 2021-07-01. This feature is still in preview mode and is not + supported for VirtualMachineScaleSet. Please follow the instructions in `VM Customization + `_ for more details. :paramtype vm_size_properties: ~azure.mgmt.compute.v2022_08_01.models.VMSizeProperties """ super().__init__(**kwargs) @@ -3765,9 +3735,8 @@ def __init__(self, **kwargs: Any) -> None: class LinuxConfiguration(_serialization.Model): - """Specifies the Linux operating system settings on the virtual machine. - :code:`
`:code:`
`For a list of supported Linux distributions, see `Linux on - Azure-Endorsed Distributions + """Specifies the Linux operating system settings on the virtual machine. For a list of supported + Linux distributions, see `Linux on Azure-Endorsed Distributions `_. :ivar disable_password_authentication: Specifies whether password authentication should be @@ -3776,9 +3745,9 @@ class LinuxConfiguration(_serialization.Model): :ivar ssh: Specifies the ssh key configuration for a Linux OS. :vartype ssh: ~azure.mgmt.compute.v2022_08_01.models.SshConfiguration :ivar provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the - virtual machine. :code:`
`:code:`
` When this property is not specified in the request - body, default behavior is to set it to true. This will ensure that VM Agent is installed on - the VM so that extensions can be added to the VM later. + virtual machine. When this property is not specified in the request body, default behavior is + to set it to true. This will ensure that VM Agent is installed on the VM so that extensions + can be added to the VM later. :vartype provision_vm_agent: bool :ivar patch_settings: [Preview Feature] Specifies settings related to VM Guest Patching on Linux. @@ -3813,9 +3782,9 @@ def __init__( :keyword ssh: Specifies the ssh key configuration for a Linux OS. :paramtype ssh: ~azure.mgmt.compute.v2022_08_01.models.SshConfiguration :keyword provision_vm_agent: Indicates whether virtual machine agent should be provisioned on - the virtual machine. :code:`
`:code:`
` When this property is not specified in the - request body, default behavior is to set it to true. This will ensure that VM Agent is - installed on the VM so that extensions can be added to the VM later. + the virtual machine. When this property is not specified in the request body, default behavior + is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions + can be added to the VM later. :paramtype provision_vm_agent: bool :keyword patch_settings: [Preview Feature] Specifies settings related to VM Guest Patching on Linux. @@ -4451,19 +4420,18 @@ def __init__(self, **kwargs: Any) -> None: class OSDisk(_serialization.Model): # pylint: disable=too-many-instance-attributes - """Specifies information about the operating system disk used by the virtual machine. - :code:`
`:code:`
` For more information about disks, see `About disks and VHDs for Azure - virtual machines `_. + """Specifies information about the operating system disk used by the virtual machine. For more + information about disks, see `About disks and VHDs for Azure virtual machines + `_. All required parameters must be populated in order to send to Azure. :ivar os_type: This property allows you to specify the type of the OS that is included in the - disk if creating a VM from user-image or a specialized VHD. :code:`
`:code:`
` Possible - values are: :code:`
`:code:`
` **Windows** :code:`
`:code:`
` **Linux**. Known - values are: "Windows" and "Linux". + disk if creating a VM from user-image or a specialized VHD. Possible values are: **Windows** + **Linux**. Known values are: "Windows" and "Linux". :vartype os_type: str or ~azure.mgmt.compute.v2022_08_01.models.OperatingSystemTypes - :ivar encryption_settings: Specifies the encryption settings for the OS Disk. - :code:`
`:code:`
` Minimum api-version: 2015-06-15. + :ivar encryption_settings: Specifies the encryption settings for the OS Disk. Minimum + api-version: 2015-06-15. :vartype encryption_settings: ~azure.mgmt.compute.v2022_08_01.models.DiskEncryptionSettings :ivar name: The disk name. :vartype name: str @@ -4473,11 +4441,9 @@ class OSDisk(_serialization.Model): # pylint: disable=too-many-instance-attribu before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. :vartype image: ~azure.mgmt.compute.v2022_08_01.models.VirtualHardDisk - :ivar caching: Specifies the caching requirements. :code:`
`:code:`
` Possible values - are: :code:`
`:code:`
` **None** :code:`
`:code:`
` **ReadOnly** - :code:`
`:code:`
` **ReadWrite** :code:`
`:code:`
` Default: **None** for Standard - storage. **ReadOnly** for Premium storage. Known values are: "None", "ReadOnly", and - "ReadWrite". + :ivar caching: Specifies the caching requirements. Possible values are: **None** + **ReadOnly** **ReadWrite** Default: **None** for Standard storage. **ReadOnly** for Premium + storage. Known values are: "None", "ReadOnly", and "ReadWrite". :vartype caching: str or ~azure.mgmt.compute.v2022_08_01.models.CachingTypes :ivar write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or disabled on the disk. @@ -4485,29 +4451,25 @@ class OSDisk(_serialization.Model): # pylint: disable=too-many-instance-attribu :ivar diff_disk_settings: Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. :vartype diff_disk_settings: ~azure.mgmt.compute.v2022_08_01.models.DiffDiskSettings - :ivar create_option: Specifies how the virtual machine should be - created.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Attach** \u2013 - This value is used when you are using a specialized disk to create the virtual - machine.:code:`
`:code:`
` **FromImage** \u2013 This value is used when you are using an - image to create the virtual machine. If you are using a platform image, you also use the - imageReference element described above. If you are using a marketplace image, you also use the - plan element previously described. Required. Known values are: "FromImage", "Empty", and - "Attach". + :ivar create_option: Specifies how the virtual machine should be created. Possible values are: + **Attach** \u2013 This value is used when you are using a specialized disk to create the + virtual machine. **FromImage** \u2013 This value is used when you are using an image to create + the virtual machine. If you are using a platform image, you also use the imageReference element + described above. If you are using a marketplace image, you also use the plan element + previously described. Required. Known values are: "FromImage", "Empty", and "Attach". :vartype create_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskCreateOptionTypes :ivar disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can be - used to overwrite the size of the disk in a virtual machine image. :code:`
`:code:`
` - diskSizeGB is the number of bytes x 1024^3 for the disk and the value cannot be larger than - 1023. + used to overwrite the size of the disk in a virtual machine image. diskSizeGB is the number of + bytes x 1024^3 for the disk and the value cannot be larger than 1023. :vartype disk_size_gb: int :ivar managed_disk: The managed disk parameters. :vartype managed_disk: ~azure.mgmt.compute.v2022_08_01.models.ManagedDiskParameters :ivar delete_option: Specifies whether OS Disk should be deleted or detached upon VM deletion. - :code:`
`:code:`
` Possible values: :code:`
`:code:`
` **Delete** If this value is - used, the OS disk is deleted when VM is deleted.:code:`
`:code:`
` **Detach** If this - value is used, the os disk is retained after VM is deleted. :code:`
`:code:`
` The - default value is set to **detach**. For an ephemeral OS Disk, the default value is set to - **Delete**. User cannot change the delete option for ephemeral OS Disk. Known values are: - "Delete" and "Detach". + Possible values: **Delete** If this value is used, the OS disk is deleted when VM is deleted. + **Detach** If this value is used, the os disk is retained after VM is deleted. The default + value is set to **detach**. For an ephemeral OS Disk, the default value is set to **Delete**. + User cannot change the delete option for ephemeral OS Disk. Known values are: "Delete" and + "Detach". :vartype delete_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskDeleteOptionTypes """ @@ -4549,12 +4511,11 @@ def __init__( ) -> None: """ :keyword os_type: This property allows you to specify the type of the OS that is included in - the disk if creating a VM from user-image or a specialized VHD. :code:`
`:code:`
` - Possible values are: :code:`
`:code:`
` **Windows** :code:`
`:code:`
` **Linux**. - Known values are: "Windows" and "Linux". + the disk if creating a VM from user-image or a specialized VHD. Possible values are: + **Windows** **Linux**. Known values are: "Windows" and "Linux". :paramtype os_type: str or ~azure.mgmt.compute.v2022_08_01.models.OperatingSystemTypes - :keyword encryption_settings: Specifies the encryption settings for the OS Disk. - :code:`
`:code:`
` Minimum api-version: 2015-06-15. + :keyword encryption_settings: Specifies the encryption settings for the OS Disk. Minimum + api-version: 2015-06-15. :paramtype encryption_settings: ~azure.mgmt.compute.v2022_08_01.models.DiskEncryptionSettings :keyword name: The disk name. :paramtype name: str @@ -4564,11 +4525,9 @@ def __init__( before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. :paramtype image: ~azure.mgmt.compute.v2022_08_01.models.VirtualHardDisk - :keyword caching: Specifies the caching requirements. :code:`
`:code:`
` Possible values - are: :code:`
`:code:`
` **None** :code:`
`:code:`
` **ReadOnly** - :code:`
`:code:`
` **ReadWrite** :code:`
`:code:`
` Default: **None** for Standard - storage. **ReadOnly** for Premium storage. Known values are: "None", "ReadOnly", and - "ReadWrite". + :keyword caching: Specifies the caching requirements. Possible values are: **None** + **ReadOnly** **ReadWrite** Default: **None** for Standard storage. **ReadOnly** for Premium + storage. Known values are: "None", "ReadOnly", and "ReadWrite". :paramtype caching: str or ~azure.mgmt.compute.v2022_08_01.models.CachingTypes :keyword write_accelerator_enabled: Specifies whether writeAccelerator should be enabled or disabled on the disk. @@ -4576,26 +4535,22 @@ def __init__( :keyword diff_disk_settings: Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. :paramtype diff_disk_settings: ~azure.mgmt.compute.v2022_08_01.models.DiffDiskSettings - :keyword create_option: Specifies how the virtual machine should be - created.:code:`
`:code:`
` Possible values are::code:`
`:code:`
` **Attach** \u2013 - This value is used when you are using a specialized disk to create the virtual - machine.:code:`
`:code:`
` **FromImage** \u2013 This value is used when you are using an - image to create the virtual machine. If you are using a platform image, you also use the - imageReference element described above. If you are using a marketplace image, you also use the - plan element previously described. Required. Known values are: "FromImage", "Empty", and - "Attach". + :keyword create_option: Specifies how the virtual machine should be created. Possible values + are: **Attach** \u2013 This value is used when you are using a specialized disk to create the + virtual machine. **FromImage** \u2013 This value is used when you are using an image to create + the virtual machine. If you are using a platform image, you also use the imageReference element + described above. If you are using a marketplace image, you also use the plan element + previously described. Required. Known values are: "FromImage", "Empty", and "Attach". :paramtype create_option: str or ~azure.mgmt.compute.v2022_08_01.models.DiskCreateOptionTypes :keyword disk_size_gb: Specifies the size of an empty data disk in gigabytes. This element can - be used to overwrite the size of the disk in a virtual machine image. :code:`
`:code:`
` - diskSizeGB is the number of bytes x 1024^3 for the disk and the value cannot be larger than - 1023. + be used to overwrite the size of the disk in a virtual machine image. diskSizeGB is the number + of bytes x 1024^3 for the disk and the value cannot be larger than 1023. :paramtype disk_size_gb: int :keyword managed_disk: The managed disk parameters. :paramtype managed_disk: ~azure.mgmt.compute.v2022_08_01.models.ManagedDiskParameters :keyword delete_option: Specifies whether OS Disk should be deleted or detached upon VM - deletion. :code:`
`:code:`
` Possible values: :code:`
`:code:`
` **Delete** If this - value is used, the OS disk is deleted when VM is deleted.:code:`
`:code:`
` **Detach** If - this value is used, the os disk is retained after VM is deleted. :code:`
`:code:`
` The + deletion. Possible values: **Delete** If this value is used, the OS disk is deleted when VM + is deleted. **Detach** If this value is used, the os disk is retained after VM is deleted. The default value is set to **detach**. For an ephemeral OS Disk, the default value is set to **Delete**. User cannot change the delete option for ephemeral OS Disk. Known values are: "Delete" and "Detach". @@ -4648,56 +4603,48 @@ class OSProfile(_serialization.Model): """Specifies the operating system settings for the virtual machine. Some of the settings cannot be changed once VM is provisioned. - :ivar computer_name: Specifies the host OS name of the virtual machine. - :code:`
`:code:`
` This name cannot be updated after the VM is created. - :code:`
`:code:`
` **Max-length (Windows):** 15 characters :code:`
`:code:`
` - **Max-length (Linux):** 64 characters. :code:`
`:code:`
` For naming conventions and - restrictions see `Azure infrastructure services implementation guidelines + :ivar computer_name: Specifies the host OS name of the virtual machine. This name cannot be + updated after the VM is created. **Max-length (Windows):** 15 characters **Max-length + (Linux):** 64 characters. For naming conventions and restrictions see `Azure infrastructure + services implementation guidelines `_. :vartype computer_name: str - :ivar admin_username: Specifies the name of the administrator account. :code:`
`:code:`
` - This property cannot be updated after the VM is created. :code:`
`:code:`
` - **Windows-only restriction:** Cannot end in "." :code:`
`:code:`
` **Disallowed values:** - "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", - "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", - "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", - "test3", "user4", "user5". :code:`
`:code:`
` **Minimum-length (Linux):** 1 character - :code:`
`:code:`
` **Max-length (Linux):** 64 characters :code:`
`:code:`
` - **Max-length (Windows):** 20 characters. + :ivar admin_username: Specifies the name of the administrator account. This property cannot be + updated after the VM is created. **Windows-only restriction:** Cannot end in "." **Disallowed + values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", + "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", + "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", + "test2", "test3", "user4", "user5". **Minimum-length (Linux):** 1 character **Max-length + (Linux):** 64 characters **Max-length (Windows):** 20 characters. :vartype admin_username: str - :ivar admin_password: Specifies the password of the administrator account. - :code:`
`:code:`
` **Minimum-length (Windows):** 8 characters :code:`
`:code:`
` - **Minimum-length (Linux):** 6 characters :code:`
`:code:`
` **Max-length (Windows):** 123 - characters :code:`
`:code:`
` **Max-length (Linux):** 72 characters - :code:`
`:code:`
` **Complexity requirements:** 3 out of 4 conditions below need to be - fulfilled :code:`
` Has lower characters :code:`
`Has upper characters :code:`
` Has a - digit :code:`
` Has a special character (Regex match [\W_]) :code:`
`:code:`
` - **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", - "pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`
`:code:`
` For - resetting the password, see `How to reset the Remote Desktop service or its login password in a - Windows VM `_ - :code:`
`:code:`
` For resetting root password, see `Manage users, SSH, and check or - repair disks on Azure Linux VMs using the VMAccess Extension + :ivar admin_password: Specifies the password of the administrator account. **Minimum-length + (Windows):** 8 characters **Minimum-length (Linux):** 6 characters **Max-length (Windows):** + 123 characters **Max-length (Linux):** 72 characters **Complexity requirements:** 3 out of 4 + conditions below need to be fulfilled Has lower characters Has upper characters Has a digit + Has a special character (Regex match [\W_]) **Disallowed values:** "abc@123", "P@$$w0rd", + "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", + "iloveyou!" For resetting the password, see `How to reset the Remote Desktop service or its + login password in a Windows VM + `_ For resetting + root password, see `Manage users, SSH, and check or repair disks on Azure Linux VMs using the + VMAccess Extension `_. :vartype admin_password: str :ivar custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum - length of the binary array is 65535 bytes. :code:`
`:code:`
` **Note: Do not pass any - secrets or passwords in customData property** :code:`
`:code:`
` This property cannot be - updated after the VM is created. :code:`
`:code:`
` customData is passed to the VM to be - saved as a file, for more information see `Custom Data on Azure VMs - `_ - :code:`
`:code:`
` For using cloud-init for your Linux VM, see `Using cloud-init to - customize a Linux VM during creation + length of the binary array is 65535 bytes. **Note: Do not pass any secrets or passwords in + customData property** This property cannot be updated after the VM is created. customData is + passed to the VM to be saved as a file, for more information see `Custom Data on Azure VMs + `_ For using + cloud-init for your Linux VM, see `Using cloud-init to customize a Linux VM during creation `_. :vartype custom_data: str :ivar windows_configuration: Specifies Windows operating system settings on the virtual machine. :vartype windows_configuration: ~azure.mgmt.compute.v2022_08_01.models.WindowsConfiguration :ivar linux_configuration: Specifies the Linux operating system settings on the virtual - machine. :code:`
`:code:`
`For a list of supported Linux distributions, see `Linux on - Azure-Endorsed Distributions - `_. + machine. For a list of supported Linux distributions, see `Linux on Azure-Endorsed + Distributions `_. :vartype linux_configuration: ~azure.mgmt.compute.v2022_08_01.models.LinuxConfiguration :ivar secrets: Specifies set of certificates that should be installed onto the virtual machine. To install certificates on a virtual machine it is recommended to use the `Azure Key Vault @@ -4707,8 +4654,8 @@ class OSProfile(_serialization.Model): `_. :vartype secrets: list[~azure.mgmt.compute.v2022_08_01.models.VaultSecretGroup] :ivar allow_extension_operations: Specifies whether extension operations should be allowed on - the virtual machine. :code:`
`:code:`
`This may only be set to False when no extensions - are present on the virtual machine. + the virtual machine. This may only be set to False when no extensions are present on the + virtual machine. :vartype allow_extension_operations: bool :ivar require_guest_provision_signal: Optional property which must either be set to True or omitted. @@ -4742,57 +4689,48 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword computer_name: Specifies the host OS name of the virtual machine. - :code:`
`:code:`
` This name cannot be updated after the VM is created. - :code:`
`:code:`
` **Max-length (Windows):** 15 characters :code:`
`:code:`
` - **Max-length (Linux):** 64 characters. :code:`
`:code:`
` For naming conventions and - restrictions see `Azure infrastructure services implementation guidelines + :keyword computer_name: Specifies the host OS name of the virtual machine. This name cannot be + updated after the VM is created. **Max-length (Windows):** 15 characters **Max-length + (Linux):** 64 characters. For naming conventions and restrictions see `Azure infrastructure + services implementation guidelines `_. :paramtype computer_name: str - :keyword admin_username: Specifies the name of the administrator account. - :code:`
`:code:`
` This property cannot be updated after the VM is created. - :code:`
`:code:`
` **Windows-only restriction:** Cannot end in "." - :code:`
`:code:`
` **Disallowed values:** "administrator", "admin", "user", "user1", - "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", - "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", - "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". - :code:`
`:code:`
` **Minimum-length (Linux):** 1 character :code:`
`:code:`
` - **Max-length (Linux):** 64 characters :code:`
`:code:`
` **Max-length (Windows):** 20 - characters. + :keyword admin_username: Specifies the name of the administrator account. This property cannot + be updated after the VM is created. **Windows-only restriction:** Cannot end in "." + **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", + "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", + "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", + "sys", "test2", "test3", "user4", "user5". **Minimum-length (Linux):** 1 character + **Max-length (Linux):** 64 characters **Max-length (Windows):** 20 characters. :paramtype admin_username: str - :keyword admin_password: Specifies the password of the administrator account. - :code:`
`:code:`
` **Minimum-length (Windows):** 8 characters :code:`
`:code:`
` - **Minimum-length (Linux):** 6 characters :code:`
`:code:`
` **Max-length (Windows):** 123 - characters :code:`
`:code:`
` **Max-length (Linux):** 72 characters - :code:`
`:code:`
` **Complexity requirements:** 3 out of 4 conditions below need to be - fulfilled :code:`
` Has lower characters :code:`
`Has upper characters :code:`
` Has a - digit :code:`
` Has a special character (Regex match [\W_]) :code:`
`:code:`
` - **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", - "pass@word1", "Password!", "Password1", "Password22", "iloveyou!" :code:`
`:code:`
` For - resetting the password, see `How to reset the Remote Desktop service or its login password in a - Windows VM `_ - :code:`
`:code:`
` For resetting root password, see `Manage users, SSH, and check or - repair disks on Azure Linux VMs using the VMAccess Extension + :keyword admin_password: Specifies the password of the administrator account. **Minimum-length + (Windows):** 8 characters **Minimum-length (Linux):** 6 characters **Max-length (Windows):** + 123 characters **Max-length (Linux):** 72 characters **Complexity requirements:** 3 out of 4 + conditions below need to be fulfilled Has lower characters Has upper characters Has a digit + Has a special character (Regex match [\W_]) **Disallowed values:** "abc@123", "P@$$w0rd", + "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", + "iloveyou!" For resetting the password, see `How to reset the Remote Desktop service or its + login password in a Windows VM + `_ For resetting + root password, see `Manage users, SSH, and check or repair disks on Azure Linux VMs using the + VMAccess Extension `_. :paramtype admin_password: str :keyword custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum - length of the binary array is 65535 bytes. :code:`
`:code:`
` **Note: Do not pass any - secrets or passwords in customData property** :code:`
`:code:`
` This property cannot be - updated after the VM is created. :code:`
`:code:`
` customData is passed to the VM to be - saved as a file, for more information see `Custom Data on Azure VMs - `_ - :code:`
`:code:`
` For using cloud-init for your Linux VM, see `Using cloud-init to - customize a Linux VM during creation + length of the binary array is 65535 bytes. **Note: Do not pass any secrets or passwords in + customData property** This property cannot be updated after the VM is created. customData is + passed to the VM to be saved as a file, for more information see `Custom Data on Azure VMs + `_ For using + cloud-init for your Linux VM, see `Using cloud-init to customize a Linux VM during creation `_. :paramtype custom_data: str :keyword windows_configuration: Specifies Windows operating system settings on the virtual machine. :paramtype windows_configuration: ~azure.mgmt.compute.v2022_08_01.models.WindowsConfiguration :keyword linux_configuration: Specifies the Linux operating system settings on the virtual - machine. :code:`
`:code:`
`For a list of supported Linux distributions, see `Linux on - Azure-Endorsed Distributions - `_. + machine. For a list of supported Linux distributions, see `Linux on Azure-Endorsed + Distributions `_. :paramtype linux_configuration: ~azure.mgmt.compute.v2022_08_01.models.LinuxConfiguration :keyword secrets: Specifies set of certificates that should be installed onto the virtual machine. To install certificates on a virtual machine it is recommended to use the `Azure Key @@ -4802,8 +4740,8 @@ def __init__( `_. :paramtype secrets: list[~azure.mgmt.compute.v2022_08_01.models.VaultSecretGroup] :keyword allow_extension_operations: Specifies whether extension operations should be allowed - on the virtual machine. :code:`
`:code:`
`This may only be set to False when no - extensions are present on the virtual machine. + on the virtual machine. This may only be set to False when no extensions are present on the + virtual machine. :paramtype allow_extension_operations: bool :keyword require_guest_provision_signal: Optional property which must either be set to True or omitted. @@ -6805,17 +6743,16 @@ class SecurityProfile(_serialization.Model): """Specifies the Security profile settings for the virtual machine or virtual machine scale set. :ivar uefi_settings: Specifies the security settings like secure boot and vTPM used while - creating the virtual machine. :code:`
`:code:`
`Minimum api-version: 2020-12-01. + creating the virtual machine. Minimum api-version: 2020-12-01. :vartype uefi_settings: ~azure.mgmt.compute.v2022_08_01.models.UefiSettings :ivar encryption_at_host: This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will enable the - encryption for all the disks including Resource/Temp disk at host itself. - :code:`
`:code:`
` Default: The Encryption at host will be disabled unless this property - is set to true for the resource. + encryption for all the disks including Resource/Temp disk at host itself. Default: The + Encryption at host will be disabled unless this property is set to true for the resource. :vartype encryption_at_host: bool :ivar security_type: Specifies the SecurityType of the virtual machine. It has to be set to any - specified value to enable UefiSettings. :code:`
`:code:`
` Default: UefiSettings will not - be enabled unless this property is set. Known values are: "TrustedLaunch" and "ConfidentialVM". + specified value to enable UefiSettings. Default: UefiSettings will not be enabled unless this + property is set. Known values are: "TrustedLaunch" and "ConfidentialVM". :vartype security_type: str or ~azure.mgmt.compute.v2022_08_01.models.SecurityTypes """ @@ -6835,18 +6772,16 @@ def __init__( ) -> None: """ :keyword uefi_settings: Specifies the security settings like secure boot and vTPM used while - creating the virtual machine. :code:`
`:code:`
`Minimum api-version: 2020-12-01. + creating the virtual machine. Minimum api-version: 2020-12-01. :paramtype uefi_settings: ~azure.mgmt.compute.v2022_08_01.models.UefiSettings :keyword encryption_at_host: This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual machine scale set. This will - enable the encryption for all the disks including Resource/Temp disk at host itself. - :code:`
`:code:`
` Default: The Encryption at host will be disabled unless this property - is set to true for the resource. + enable the encryption for all the disks including Resource/Temp disk at host itself. Default: + The Encryption at host will be disabled unless this property is set to true for the resource. :paramtype encryption_at_host: bool :keyword security_type: Specifies the SecurityType of the virtual machine. It has to be set to - any specified value to enable UefiSettings. :code:`
`:code:`
` Default: UefiSettings will - not be enabled unless this property is set. Known values are: "TrustedLaunch" and - "ConfidentialVM". + any specified value to enable UefiSettings. Default: UefiSettings will not be enabled unless + this property is set. Known values are: "TrustedLaunch" and "ConfidentialVM". :paramtype security_type: str or ~azure.mgmt.compute.v2022_08_01.models.SecurityTypes """ super().__init__(**kwargs) @@ -6958,8 +6893,8 @@ class SshPublicKey(_serialization.Model): /home/user/.ssh/authorized_keys. :vartype path: str :ivar key_data: SSH public key certificate used to authenticate with the VM through ssh. The - key needs to be at least 2048-bit and in ssh-rsa format. :code:`
`:code:`
` For creating - ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in + key needs to be at least 2048-bit and in ssh-rsa format. For creating ssh keys, see [Create SSH + keys on Linux and Mac for Linux VMs in Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed). :vartype key_data: str """ @@ -6976,8 +6911,8 @@ def __init__(self, *, path: Optional[str] = None, key_data: Optional[str] = None /home/user/.ssh/authorized_keys. :paramtype path: str :keyword key_data: SSH public key certificate used to authenticate with the VM through ssh. The - key needs to be at least 2048-bit and in ssh-rsa format. :code:`
`:code:`
` For creating - ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in + key needs to be at least 2048-bit and in ssh-rsa format. For creating ssh keys, see [Create SSH + keys on Linux and Mac for Linux VMs in Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed). :paramtype key_data: str """ @@ -7174,23 +7109,20 @@ class StorageProfile(_serialization.Model): but is not used in other creation operations. :vartype image_reference: ~azure.mgmt.compute.v2022_08_01.models.ImageReference :ivar os_disk: Specifies information about the operating system disk used by the virtual - machine. :code:`
`:code:`
` For more information about disks, see `About disks and VHDs - for Azure virtual machines - `_. + machine. For more information about disks, see `About disks and VHDs for Azure virtual + machines `_. :vartype os_disk: ~azure.mgmt.compute.v2022_08_01.models.OSDisk :ivar data_disks: Specifies the parameters that are used to add a data disk to a virtual - machine. :code:`
`:code:`
` For more information about disks, see `About disks and VHDs - for Azure virtual machines - `_. + machine. For more information about disks, see `About disks and VHDs for Azure virtual + machines `_. :vartype data_disks: list[~azure.mgmt.compute.v2022_08_01.models.DataDisk] - :ivar disk_controller_type: Specifies the disk controller type configured for the VM. - :code:`
`:code:`
`NOTE: This property will be set to the default disk controller type if - not specified provided virtual machine is being created as a hyperVGeneration: V2 based on the - capabilities of the operating system disk and VM size from the the specified minimum api - version. :code:`
`You need to deallocate the VM before updating its disk controller type - unless you are updating the VM size in the VM configuration which implicitly deallocates and - reallocates the VM. :code:`
`:code:`
` Minimum api-version: 2022-08-01. Known values are: - "SCSI" and "NVMe". + :ivar disk_controller_type: Specifies the disk controller type configured for the VM. NOTE: + This property will be set to the default disk controller type if not specified provided virtual + machine is being created as a hyperVGeneration: V2 based on the capabilities of the operating + system disk and VM size from the the specified minimum api version. You need to deallocate the + VM before updating its disk controller type unless you are updating the VM size in the VM + configuration which implicitly deallocates and reallocates the VM. Minimum api-version: + 2022-08-01. Known values are: "SCSI" and "NVMe". :vartype disk_controller_type: str or ~azure.mgmt.compute.v2022_08_01.models.DiskControllerTypes """ @@ -7218,23 +7150,20 @@ def __init__( but is not used in other creation operations. :paramtype image_reference: ~azure.mgmt.compute.v2022_08_01.models.ImageReference :keyword os_disk: Specifies information about the operating system disk used by the virtual - machine. :code:`
`:code:`
` For more information about disks, see `About disks and VHDs - for Azure virtual machines - `_. + machine. For more information about disks, see `About disks and VHDs for Azure virtual + machines `_. :paramtype os_disk: ~azure.mgmt.compute.v2022_08_01.models.OSDisk :keyword data_disks: Specifies the parameters that are used to add a data disk to a virtual - machine. :code:`
`:code:`
` For more information about disks, see `About disks and VHDs - for Azure virtual machines - `_. + machine. For more information about disks, see `About disks and VHDs for Azure virtual + machines `_. :paramtype data_disks: list[~azure.mgmt.compute.v2022_08_01.models.DataDisk] - :keyword disk_controller_type: Specifies the disk controller type configured for the VM. - :code:`
`:code:`
`NOTE: This property will be set to the default disk controller type if - not specified provided virtual machine is being created as a hyperVGeneration: V2 based on the - capabilities of the operating system disk and VM size from the the specified minimum api - version. :code:`
`You need to deallocate the VM before updating its disk controller type - unless you are updating the VM size in the VM configuration which implicitly deallocates and - reallocates the VM. :code:`
`:code:`
` Minimum api-version: 2022-08-01. Known values are: - "SCSI" and "NVMe". + :keyword disk_controller_type: Specifies the disk controller type configured for the VM. NOTE: + This property will be set to the default disk controller type if not specified provided virtual + machine is being created as a hyperVGeneration: V2 based on the capabilities of the operating + system disk and VM size from the the specified minimum api version. You need to deallocate the + VM before updating its disk controller type unless you are updating the VM size in the VM + configuration which implicitly deallocates and reallocates the VM. Minimum api-version: + 2022-08-01. Known values are: "SCSI" and "NVMe". :paramtype disk_controller_type: str or ~azure.mgmt.compute.v2022_08_01.models.DiskControllerTypes """ @@ -7453,13 +7382,13 @@ def __init__( class UefiSettings(_serialization.Model): """Specifies the security settings like secure boot and vTPM used while creating the virtual - machine. :code:`
`:code:`
`Minimum api-version: 2020-12-01. + machine. Minimum api-version: 2020-12-01. :ivar secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual - machine. :code:`
`:code:`
`Minimum api-version: 2020-12-01. + machine. Minimum api-version: 2020-12-01. :vartype secure_boot_enabled: bool - :ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. - :code:`
`:code:`
`Minimum api-version: 2020-12-01. + :ivar v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. Minimum + api-version: 2020-12-01. :vartype v_tpm_enabled: bool """ @@ -7473,10 +7402,10 @@ def __init__( ) -> None: """ :keyword secure_boot_enabled: Specifies whether secure boot should be enabled on the virtual - machine. :code:`
`:code:`
`Minimum api-version: 2020-12-01. + machine. Minimum api-version: 2020-12-01. :paramtype secure_boot_enabled: bool :keyword v_tpm_enabled: Specifies whether vTPM should be enabled on the virtual machine. - :code:`
`:code:`
`Minimum api-version: 2020-12-01. + Minimum api-version: 2020-12-01. :paramtype v_tpm_enabled: bool """ super().__init__(**kwargs) @@ -7767,20 +7696,19 @@ class VaultCertificate(_serialization.Model): a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault `_. In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded - in UTF-8: :code:`
`:code:`
` {:code:`
` - "data":":code:``",:code:`
` "dataType":"pfx",:code:`
` - "password":":code:``":code:`
`} :code:`
` To install certificates on a - virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for - Linux `_ or the - `Azure Key Vault virtual machine extension for Windows + in UTF-8: { "data":":code:``", "dataType":"pfx", + "password":":code:``"} To install certificates on a virtual machine it is + recommended to use the `Azure Key Vault virtual machine extension for Linux + `_ or the `Azure + Key Vault virtual machine extension for Windows `_. :vartype certificate_url: str :ivar certificate_store: For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly - in the LocalMachine account. :code:`
`:code:`
`For Linux VMs, the certificate file is - placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt - for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of - these files are .pem formatted. + in the LocalMachine account. For Linux VMs, the certificate file is placed under the + /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 + certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are + .pem formatted. :vartype certificate_store: str """ @@ -7797,20 +7725,19 @@ def __init__( as a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault `_. In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded - in UTF-8: :code:`
`:code:`
` {:code:`
` - "data":":code:``",:code:`
` "dataType":"pfx",:code:`
` - "password":":code:``":code:`
`} :code:`
` To install certificates on a - virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for - Linux `_ or the - `Azure Key Vault virtual machine extension for Windows + in UTF-8: { "data":":code:``", "dataType":"pfx", + "password":":code:``"} To install certificates on a virtual machine it is + recommended to use the `Azure Key Vault virtual machine extension for Linux + `_ or the `Azure + Key Vault virtual machine extension for Windows `_. :paramtype certificate_url: str :keyword certificate_store: For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly - in the LocalMachine account. :code:`
`:code:`
`For Linux VMs, the certificate file is - placed under the /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt - for the X509 certificate file and <UppercaseThumbprint>.prv for private key. Both of - these files are .pem formatted. + in the LocalMachine account. For Linux VMs, the certificate file is placed under the + /var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt for the X509 + certificate file and <UppercaseThumbprint>.prv for private key. Both of these files are + .pem formatted. :paramtype certificate_store: str """ super().__init__(**kwargs) @@ -14908,14 +14835,13 @@ def __init__( # pylint: disable=too-many-locals class VMDiskSecurityProfile(_serialization.Model): - """Specifies the security profile settings for the managed disk. :code:`
`:code:`
` NOTE: It - can only be set for Confidential VMs. - - :ivar security_encryption_type: Specifies the EncryptionType of the managed disk. :code:`
` - It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState - blob, and VMGuestStateOnly for encryption of just the VMGuestState blob. - :code:`
`:code:`
` NOTE: It can be set for only Confidential VMs. Known values are: - "VMGuestStateOnly" and "DiskWithVMGuestState". + """Specifies the security profile settings for the managed disk. NOTE: It can only be set for + Confidential VMs. + + :ivar security_encryption_type: Specifies the EncryptionType of the managed disk. It is set to + DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, and + VMGuestStateOnly for encryption of just the VMGuestState blob. NOTE: It can be set for only + Confidential VMs. Known values are: "VMGuestStateOnly" and "DiskWithVMGuestState". :vartype security_encryption_type: str or ~azure.mgmt.compute.v2022_08_01.models.SecurityEncryptionTypes :ivar disk_encryption_set: Specifies the customer managed disk encryption set resource id for @@ -14938,11 +14864,10 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword security_encryption_type: Specifies the EncryptionType of the managed disk. - :code:`
` It is set to DiskWithVMGuestState for encryption of the managed disk along with - VMGuestState blob, and VMGuestStateOnly for encryption of just the VMGuestState blob. - :code:`
`:code:`
` NOTE: It can be set for only Confidential VMs. Known values are: - "VMGuestStateOnly" and "DiskWithVMGuestState". + :keyword security_encryption_type: Specifies the EncryptionType of the managed disk. It is set + to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, and + VMGuestStateOnly for encryption of just the VMGuestState blob. NOTE: It can be set for only + Confidential VMs. Known values are: "VMGuestStateOnly" and "DiskWithVMGuestState". :paramtype security_encryption_type: str or ~azure.mgmt.compute.v2022_08_01.models.SecurityEncryptionTypes :keyword disk_encryption_set: Specifies the customer managed disk encryption set resource id @@ -15096,18 +15021,16 @@ def __init__(self, *, active_placement_group_id: Optional[str] = None, **kwargs: class VMSizeProperties(_serialization.Model): """Specifies VM Size Property settings on the virtual machine. - :ivar v_cpus_available: Specifies the number of vCPUs available for the VM. - :code:`
`:code:`
` When this property is not specified in the request body the default - behavior is to set it to the value of vCPUs available for that VM size exposed in api response - of `List all available virtual machine sizes in a region - `_ . + :ivar v_cpus_available: Specifies the number of vCPUs available for the VM. When this property + is not specified in the request body the default behavior is to set it to the value of vCPUs + available for that VM size exposed in api response of `List all available virtual machine sizes + in a region `_ . :vartype v_cpus_available: int - :ivar v_cpus_per_core: Specifies the vCPU to physical core ratio. :code:`
`:code:`
` When - this property is not specified in the request body the default behavior is set to the value of - vCPUsPerCore for the VM Size exposed in api response of `List all available virtual machine - sizes in a region `_ - :code:`
`:code:`
` Setting this property to 1 also means that hyper-threading is - disabled. + :ivar v_cpus_per_core: Specifies the vCPU to physical core ratio. When this property is not + specified in the request body the default behavior is set to the value of vCPUsPerCore for the + VM Size exposed in api response of `List all available virtual machine sizes in a region + `_ Setting this property + to 1 also means that hyper-threading is disabled. :vartype v_cpus_per_core: int """ @@ -15120,19 +15043,16 @@ def __init__( self, *, v_cpus_available: Optional[int] = None, v_cpus_per_core: Optional[int] = None, **kwargs: Any ) -> None: """ - :keyword v_cpus_available: Specifies the number of vCPUs available for the VM. - :code:`
`:code:`
` When this property is not specified in the request body the default - behavior is to set it to the value of vCPUs available for that VM size exposed in api response - of `List all available virtual machine sizes in a region - `_ . + :keyword v_cpus_available: Specifies the number of vCPUs available for the VM. When this + property is not specified in the request body the default behavior is to set it to the value of + vCPUs available for that VM size exposed in api response of `List all available virtual machine + sizes in a region `_ . :paramtype v_cpus_available: int - :keyword v_cpus_per_core: Specifies the vCPU to physical core ratio. :code:`
`:code:`
` - When this property is not specified in the request body the default behavior is set to the - value of vCPUsPerCore for the VM Size exposed in api response of `List all available virtual - machine sizes in a region - `_ - :code:`
`:code:`
` Setting this property to 1 also means that hyper-threading is - disabled. + :keyword v_cpus_per_core: Specifies the vCPU to physical core ratio. When this property is not + specified in the request body the default behavior is set to the value of vCPUsPerCore for the + VM Size exposed in api response of `List all available virtual machine sizes in a region + `_ Setting this property + to 1 also means that hyper-threading is disabled. :paramtype v_cpus_per_core: int """ super().__init__(**kwargs) @@ -15144,16 +15064,16 @@ class WindowsConfiguration(_serialization.Model): """Specifies Windows operating system settings on the virtual machine. :ivar provision_vm_agent: Indicates whether virtual machine agent should be provisioned on the - virtual machine. :code:`
`:code:`
` When this property is not specified in the request - body, default behavior is to set it to true. This will ensure that VM Agent is installed on - the VM so that extensions can be added to the VM later. + virtual machine. When this property is not specified in the request body, default behavior is + to set it to true. This will ensure that VM Agent is installed on the VM so that extensions + can be added to the VM later. :vartype provision_vm_agent: bool :ivar enable_automatic_updates: Indicates whether Automatic Updates is enabled for the Windows - virtual machine. Default value is true. :code:`
`:code:`
` For virtual machine scale - sets, this property can be updated and updates will take effect on OS reprovisioning. + virtual machine. Default value is true. For virtual machine scale sets, this property can be + updated and updates will take effect on OS reprovisioning. :vartype enable_automatic_updates: bool :ivar time_zone: Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time". - :code:`
`:code:`
` Possible values can be `TimeZoneInfo.Id + Possible values can be `TimeZoneInfo.Id `_ value from time zones returned by `TimeZoneInfo.GetSystemTimeZones `_. @@ -15197,16 +15117,16 @@ def __init__( ) -> None: """ :keyword provision_vm_agent: Indicates whether virtual machine agent should be provisioned on - the virtual machine. :code:`
`:code:`
` When this property is not specified in the - request body, default behavior is to set it to true. This will ensure that VM Agent is - installed on the VM so that extensions can be added to the VM later. + the virtual machine. When this property is not specified in the request body, default behavior + is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions + can be added to the VM later. :paramtype provision_vm_agent: bool :keyword enable_automatic_updates: Indicates whether Automatic Updates is enabled for the - Windows virtual machine. Default value is true. :code:`
`:code:`
` For virtual machine - scale sets, this property can be updated and updates will take effect on OS reprovisioning. + Windows virtual machine. Default value is true. For virtual machine scale sets, this property + can be updated and updates will take effect on OS reprovisioning. :paramtype enable_automatic_updates: bool :keyword time_zone: Specifies the time zone of the virtual machine. e.g. "Pacific Standard - Time". :code:`
`:code:`
` Possible values can be `TimeZoneInfo.Id + Time". Possible values can be `TimeZoneInfo.Id `_ value from time zones returned by `TimeZoneInfo.GetSystemTimeZones `_. @@ -15349,20 +15269,18 @@ def __init__(self, *, listeners: Optional[List["_models.WinRMListener"]] = None, class WinRMListener(_serialization.Model): """Describes Protocol and thumbprint of Windows Remote Management listener. - :ivar protocol: Specifies the protocol of WinRM listener. :code:`
`:code:`
` Possible - values are: :code:`
`\ **http** :code:`
`:code:`
` **https**. Known values are: "Http" - and "Https". + :ivar protocol: Specifies the protocol of WinRM listener. Possible values are: **http** + **https**. Known values are: "Http" and "Https". :vartype protocol: str or ~azure.mgmt.compute.v2022_08_01.models.ProtocolTypes :ivar certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault `_. In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded - in UTF-8: :code:`
`:code:`
` {:code:`
` - "data":":code:``",:code:`
` "dataType":"pfx",:code:`
` - "password":":code:``":code:`
`} :code:`
` To install certificates on a - virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for - Linux `_ or the - `Azure Key Vault virtual machine extension for Windows + in UTF-8: { "data":":code:``", "dataType":"pfx", + "password":":code:``"} To install certificates on a virtual machine it is + recommended to use the `Azure Key Vault virtual machine extension for Linux + `_ or the `Azure + Key Vault virtual machine extension for Windows `_. :vartype certificate_url: str """ @@ -15380,20 +15298,18 @@ def __init__( **kwargs: Any ) -> None: """ - :keyword protocol: Specifies the protocol of WinRM listener. :code:`
`:code:`
` Possible - values are: :code:`
`\ **http** :code:`
`:code:`
` **https**. Known values are: "Http" - and "Https". + :keyword protocol: Specifies the protocol of WinRM listener. Possible values are: **http** + **https**. Known values are: "Http" and "Https". :paramtype protocol: str or ~azure.mgmt.compute.v2022_08_01.models.ProtocolTypes :keyword certificate_url: This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see `Add a key or secret to the key vault `_. In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded - in UTF-8: :code:`
`:code:`
` {:code:`
` - "data":":code:``",:code:`
` "dataType":"pfx",:code:`
` - "password":":code:``":code:`
`} :code:`
` To install certificates on a - virtual machine it is recommended to use the `Azure Key Vault virtual machine extension for - Linux `_ or the - `Azure Key Vault virtual machine extension for Windows + in UTF-8: { "data":":code:``", "dataType":"pfx", + "password":":code:``"} To install certificates on a virtual machine it is + recommended to use the `Azure Key Vault virtual machine extension for Linux + `_ or the `Azure + Key Vault virtual machine extension for Windows `_. :paramtype certificate_url: str """ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_09_04/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_09_04/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_09_04/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_09_04/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_11_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_11_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_11_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2022_11_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/__init__.py new file mode 100644 index 0000000000000..3b1b6244b2375 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/__init__.py @@ -0,0 +1,26 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._compute_management_client import ComputeManagementClient +from ._version import VERSION + +__version__ = VERSION + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ComputeManagementClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_compute_management_client.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_compute_management_client.py new file mode 100644 index 0000000000000..d03f734c731e8 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_compute_management_client.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING + +from azure.core.rest import HttpRequest, HttpResponse +from azure.mgmt.core import ARMPipelineClient + +from . import models as _models +from .._serialization import Deserializer, Serializer +from ._configuration import ComputeManagementClientConfiguration +from .operations import ( + DiskAccessesOperations, + DiskEncryptionSetsOperations, + DiskRestorePointOperations, + DisksOperations, + SnapshotsOperations, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ComputeManagementClient: # pylint: disable=client-accepts-api-version-keyword + """Compute Client. + + :ivar disks: DisksOperations operations + :vartype disks: azure.mgmt.compute.v2023_01_02.operations.DisksOperations + :ivar disk_accesses: DiskAccessesOperations operations + :vartype disk_accesses: azure.mgmt.compute.v2023_01_02.operations.DiskAccessesOperations + :ivar disk_encryption_sets: DiskEncryptionSetsOperations operations + :vartype disk_encryption_sets: + azure.mgmt.compute.v2023_01_02.operations.DiskEncryptionSetsOperations + :ivar disk_restore_point: DiskRestorePointOperations operations + :vartype disk_restore_point: + azure.mgmt.compute.v2023_01_02.operations.DiskRestorePointOperations + :ivar snapshots: SnapshotsOperations operations + :vartype snapshots: azure.mgmt.compute.v2023_01_02.operations.SnapshotsOperations + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure + subscription. The subscription ID forms part of the URI for every service call. Required. + :type subscription_id: str + :param base_url: Service URL. Default value is "https://management.azure.com". + :type base_url: str + :keyword api_version: Api Version. Default value is "2023-01-02". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "TokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + **kwargs: Any + ) -> None: + self._config = ComputeManagementClientConfiguration( + credential=credential, subscription_id=subscription_id, **kwargs + ) + self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.disks = DisksOperations(self._client, self._config, self._serialize, self._deserialize, "2023-01-02") + self.disk_accesses = DiskAccessesOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + self.disk_encryption_sets = DiskEncryptionSetsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + self.disk_restore_point = DiskRestorePointOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + self.snapshots = SnapshotsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "ComputeManagementClient": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_configuration.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_configuration.py new file mode 100644 index 0000000000000..3589dec9553c0 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_configuration.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy + +from ._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials import TokenCredential + + +class ComputeManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for ComputeManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials.TokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure + subscription. The subscription ID forms part of the URI for every service call. Required. + :type subscription_id: str + :keyword api_version: Api Version. Default value is "2023-01-02". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: "TokenCredential", subscription_id: str, **kwargs: Any) -> None: + super(ComputeManagementClientConfiguration, self).__init__(**kwargs) + api_version: str = kwargs.pop("api_version", "2023-01-02") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-compute/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = ARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_metadata.json b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_metadata.json new file mode 100644 index 0000000000000..074fbf92ad8d9 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_metadata.json @@ -0,0 +1,114 @@ +{ + "chosen_version": "2023-01-02", + "total_api_version_list": ["2023-01-02"], + "client": { + "name": "ComputeManagementClient", + "filename": "_compute_management_client", + "description": "Compute Client.", + "host_value": "\"https://management.azure.com\"", + "parameterized_host_template": null, + "azure_arm": true, + "has_lro_operations": true, + "client_side_validation": false, + "sync_imports": "{\"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}}, \"regular\": {\"azurecore\": {\"azure.mgmt.core\": [\"ARMPipelineClient\"], \"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"]}, \"local\": {\"._configuration\": [\"ComputeManagementClientConfiguration\"], \".._serialization\": [\"Deserializer\", \"Serializer\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}}", + "async_imports": "{\"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}}, \"regular\": {\"azurecore\": {\"azure.mgmt.core\": [\"AsyncARMPipelineClient\"], \"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"]}, \"local\": {\"._configuration\": [\"ComputeManagementClientConfiguration\"], \"..._serialization\": [\"Deserializer\", \"Serializer\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}}" + }, + "global_parameters": { + "sync": { + "credential": { + "signature": "credential: \"TokenCredential\",", + "description": "Credential needed for the client to connect to Azure. Required.", + "docstring_type": "~azure.core.credentials.TokenCredential", + "required": true, + "method_location": "positional" + }, + "subscription_id": { + "signature": "subscription_id: str,", + "description": "Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. Required.", + "docstring_type": "str", + "required": true, + "method_location": "positional" + } + }, + "async": { + "credential": { + "signature": "credential: \"AsyncTokenCredential\",", + "description": "Credential needed for the client to connect to Azure. Required.", + "docstring_type": "~azure.core.credentials_async.AsyncTokenCredential", + "required": true + }, + "subscription_id": { + "signature": "subscription_id: str,", + "description": "Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call. Required.", + "docstring_type": "str", + "required": true + } + }, + "constant": { + }, + "call": "credential, subscription_id", + "service_client_specific": { + "sync": { + "api_version": { + "signature": "api_version: Optional[str]=None,", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false, + "method_location": "positional" + }, + "base_url": { + "signature": "base_url: str = \"https://management.azure.com\",", + "description": "Service URL", + "docstring_type": "str", + "required": false, + "method_location": "positional" + }, + "profile": { + "signature": "profile: KnownProfiles=KnownProfiles.default,", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false, + "method_location": "positional" + } + }, + "async": { + "api_version": { + "signature": "api_version: Optional[str] = None,", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false, + "method_location": "positional" + }, + "base_url": { + "signature": "base_url: str = \"https://management.azure.com\",", + "description": "Service URL", + "docstring_type": "str", + "required": false, + "method_location": "positional" + }, + "profile": { + "signature": "profile: KnownProfiles = KnownProfiles.default,", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false, + "method_location": "positional" + } + } + } + }, + "config": { + "credential": true, + "credential_scopes": ["https://management.azure.com/.default"], + "credential_call_sync": "ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)", + "credential_call_async": "AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)", + "sync_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"], \"azure.mgmt.core.policies\": [\"ARMChallengeAuthenticationPolicy\", \"ARMHttpLoggingPolicy\"]}, \"local\": {\"._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}}", + "async_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"], \"azure.mgmt.core.policies\": [\"ARMHttpLoggingPolicy\", \"AsyncARMChallengeAuthenticationPolicy\"]}, \"local\": {\".._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}}" + }, + "operation_groups": { + "disks": "DisksOperations", + "disk_accesses": "DiskAccessesOperations", + "disk_encryption_sets": "DiskEncryptionSetsOperations", + "disk_restore_point": "DiskRestorePointOperations", + "snapshots": "SnapshotsOperations" + } +} diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_patch.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_patch.py new file mode 100644 index 0000000000000..f7dd32510333d --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_vendor.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_vendor.py new file mode 100644 index 0000000000000..bd0df84f5319f --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_vendor.py @@ -0,0 +1,30 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import List, cast + +from azure.core.pipeline.transport import HttpRequest + + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request + + +def _format_url_section(template, **kwargs): + components = template.split("/") + while components: + try: + return template.format(**kwargs) + except KeyError as key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + formatted_components = cast(List[str], template.split("/")) + components = [c for c in formatted_components if "{}".format(key.args[0]) not in c] + template = "/".join(components) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_version.py new file mode 100644 index 0000000000000..e5754a47ce68f --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/__init__.py new file mode 100644 index 0000000000000..0715f50185586 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._compute_management_client import ComputeManagementClient + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ComputeManagementClient", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_compute_management_client.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_compute_management_client.py new file mode 100644 index 0000000000000..1314fded918e0 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_compute_management_client.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING + +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.mgmt.core import AsyncARMPipelineClient + +from .. import models as _models +from ..._serialization import Deserializer, Serializer +from ._configuration import ComputeManagementClientConfiguration +from .operations import ( + DiskAccessesOperations, + DiskEncryptionSetsOperations, + DiskRestorePointOperations, + DisksOperations, + SnapshotsOperations, +) + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ComputeManagementClient: # pylint: disable=client-accepts-api-version-keyword + """Compute Client. + + :ivar disks: DisksOperations operations + :vartype disks: azure.mgmt.compute.v2023_01_02.aio.operations.DisksOperations + :ivar disk_accesses: DiskAccessesOperations operations + :vartype disk_accesses: azure.mgmt.compute.v2023_01_02.aio.operations.DiskAccessesOperations + :ivar disk_encryption_sets: DiskEncryptionSetsOperations operations + :vartype disk_encryption_sets: + azure.mgmt.compute.v2023_01_02.aio.operations.DiskEncryptionSetsOperations + :ivar disk_restore_point: DiskRestorePointOperations operations + :vartype disk_restore_point: + azure.mgmt.compute.v2023_01_02.aio.operations.DiskRestorePointOperations + :ivar snapshots: SnapshotsOperations operations + :vartype snapshots: azure.mgmt.compute.v2023_01_02.aio.operations.SnapshotsOperations + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure + subscription. The subscription ID forms part of the URI for every service call. Required. + :type subscription_id: str + :param base_url: Service URL. Default value is "https://management.azure.com". + :type base_url: str + :keyword api_version: Api Version. Default value is "2023-01-02". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + """ + + def __init__( + self, + credential: "AsyncTokenCredential", + subscription_id: str, + base_url: str = "https://management.azure.com", + **kwargs: Any + ) -> None: + self._config = ComputeManagementClientConfiguration( + credential=credential, subscription_id=subscription_id, **kwargs + ) + self._client: AsyncARMPipelineClient = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.disks = DisksOperations(self._client, self._config, self._serialize, self._deserialize, "2023-01-02") + self.disk_accesses = DiskAccessesOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + self.disk_encryption_sets = DiskEncryptionSetsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + self.disk_restore_point = DiskRestorePointOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + self.snapshots = SnapshotsOperations( + self._client, self._config, self._serialize, self._deserialize, "2023-01-02" + ) + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "ComputeManagementClient": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_configuration.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_configuration.py new file mode 100644 index 0000000000000..33fe3d0940315 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_configuration.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, TYPE_CHECKING + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies +from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy + +from .._version import VERSION + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from azure.core.credentials_async import AsyncTokenCredential + + +class ComputeManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for ComputeManagementClient. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credential: Credential needed for the client to connect to Azure. Required. + :type credential: ~azure.core.credentials_async.AsyncTokenCredential + :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure + subscription. The subscription ID forms part of the URI for every service call. Required. + :type subscription_id: str + :keyword api_version: Api Version. Default value is "2023-01-02". Note that overriding this + default value may result in unsupported behavior. + :paramtype api_version: str + """ + + def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None: + super(ComputeManagementClientConfiguration, self).__init__(**kwargs) + api_version: str = kwargs.pop("api_version", "2023-01-02") + + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + + self.credential = credential + self.subscription_id = subscription_id + self.api_version = api_version + self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "mgmt-compute/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( + self.credential, *self.credential_scopes, **kwargs + ) diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_patch.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_patch.py new file mode 100644 index 0000000000000..f7dd32510333d --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/__init__.py new file mode 100644 index 0000000000000..8ca1cc2598c3b --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/__init__.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import DisksOperations +from ._operations import DiskAccessesOperations +from ._operations import DiskEncryptionSetsOperations +from ._operations import DiskRestorePointOperations +from ._operations import SnapshotsOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "DisksOperations", + "DiskAccessesOperations", + "DiskEncryptionSetsOperations", + "DiskRestorePointOperations", + "SnapshotsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_operations.py new file mode 100644 index 0000000000000..3433783e437ed --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_operations.py @@ -0,0 +1,5289 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._operations import ( + build_disk_accesses_create_or_update_request, + build_disk_accesses_delete_a_private_endpoint_connection_request, + build_disk_accesses_delete_request, + build_disk_accesses_get_a_private_endpoint_connection_request, + build_disk_accesses_get_private_link_resources_request, + build_disk_accesses_get_request, + build_disk_accesses_list_by_resource_group_request, + build_disk_accesses_list_private_endpoint_connections_request, + build_disk_accesses_list_request, + build_disk_accesses_update_a_private_endpoint_connection_request, + build_disk_accesses_update_request, + build_disk_encryption_sets_create_or_update_request, + build_disk_encryption_sets_delete_request, + build_disk_encryption_sets_get_request, + build_disk_encryption_sets_list_associated_resources_request, + build_disk_encryption_sets_list_by_resource_group_request, + build_disk_encryption_sets_list_request, + build_disk_encryption_sets_update_request, + build_disk_restore_point_get_request, + build_disk_restore_point_grant_access_request, + build_disk_restore_point_list_by_restore_point_request, + build_disk_restore_point_revoke_access_request, + build_disks_create_or_update_request, + build_disks_delete_request, + build_disks_get_request, + build_disks_grant_access_request, + build_disks_list_by_resource_group_request, + build_disks_list_request, + build_disks_revoke_access_request, + build_disks_update_request, + build_snapshots_create_or_update_request, + build_snapshots_delete_request, + build_snapshots_get_request, + build_snapshots_grant_access_request, + build_snapshots_list_by_resource_group_request, + build_snapshots_list_request, + build_snapshots_revoke_access_request, + build_snapshots_update_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class DisksOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.aio.ComputeManagementClient`'s + :attr:`disks` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + async def _create_or_update_initial( + self, resource_group_name: str, disk_name: str, disk: Union[_models.Disk, IO], **kwargs: Any + ) -> _models.Disk: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk, (IOBase, bytes)): + _content = disk + else: + _json = self._serialize.body(disk, "Disk") + + request = build_disks_create_or_update_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Disk", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Disk", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + disk_name: str, + disk: _models.Disk, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Disk]: + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk operation. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.Disk + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + disk_name: str, + disk: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Disk]: + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk operation. Required. + :type disk: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, resource_group_name: str, disk_name: str, disk: Union[_models.Disk, IO], **kwargs: Any + ) -> AsyncLROPoller[_models.Disk]: + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk operation. Is either a Disk type + or a IO type. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.Disk or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + disk=disk, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Disk", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + async def _update_initial( + self, resource_group_name: str, disk_name: str, disk: Union[_models.DiskUpdate, IO], **kwargs: Any + ) -> _models.Disk: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk, (IOBase, bytes)): + _content = disk + else: + _json = self._serialize.body(disk, "DiskUpdate") + + request = build_disks_update_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Disk", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Disk", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @overload + async def begin_update( + self, + resource_group_name: str, + disk_name: str, + disk: _models.DiskUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Disk]: + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk operation. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.DiskUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + disk_name: str, + disk: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Disk]: + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk operation. Required. + :type disk: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, resource_group_name: str, disk_name: str, disk: Union[_models.DiskUpdate, IO], **kwargs: Any + ) -> AsyncLROPoller[_models.Disk]: + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk operation. Is either a + DiskUpdate type or a IO type. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.DiskUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + disk=disk, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Disk", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @distributed_trace_async + async def get(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> _models.Disk: + """Gets information about a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Disk or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.Disk + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + + request = build_disks_get_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Disk", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disks_delete_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @distributed_trace_async + async def begin_delete(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Deletes a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + disk_name=disk_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Disk"]: + """Lists all the disks under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Disk or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disks_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks" + } + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.Disk"]: + """Lists all the disks under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Disk or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disks_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks"} + + async def _grant_access_initial( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> Optional[_models.AccessUri]: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Optional[_models.AccessUri]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(grant_access_data, (IOBase, bytes)): + _content = grant_access_data + else: + _json = self._serialize.body(grant_access_data, "GrantAccessData") + + request = build_disks_grant_access_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._grant_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize("AccessUri", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + _grant_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess" + } + + @overload + async def begin_grant_access( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: _models.GrantAccessData, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_grant_access( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_grant_access( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Is either a GrantAccessData type or a IO type. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AccessUri] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._grant_access_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + grant_access_data=grant_access_data, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AccessUri", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_grant_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess" + } + + async def _revoke_access_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disks_revoke_access_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._revoke_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _revoke_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess" + } + + @distributed_trace_async + async def begin_revoke_access( + self, resource_group_name: str, disk_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Revokes access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._revoke_access_initial( # type: ignore + resource_group_name=resource_group_name, + disk_name=disk_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_revoke_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess" + } + + +class DiskAccessesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.aio.ComputeManagementClient`'s + :attr:`disk_accesses` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + async def _create_or_update_initial( + self, resource_group_name: str, disk_access_name: str, disk_access: Union[_models.DiskAccess, IO], **kwargs: Any + ) -> _models.DiskAccess: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_access, (IOBase, bytes)): + _content = disk_access + else: + _json = self._serialize.body(disk_access, "DiskAccess") + + request = build_disk_accesses_create_or_update_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: _models.DiskAccess, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskAccess]: + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Put disk access operation. + Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccess + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskAccess or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskAccess]: + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Put disk access operation. + Required. + :type disk_access: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskAccess or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, resource_group_name: str, disk_access_name: str, disk_access: Union[_models.DiskAccess, IO], **kwargs: Any + ) -> AsyncLROPoller[_models.DiskAccess]: + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Put disk access operation. + Is either a DiskAccess type or a IO type. Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccess or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskAccess or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + disk_access=disk_access, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskAccess", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + async def _update_initial( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: Union[_models.DiskAccessUpdate, IO], + **kwargs: Any + ) -> _models.DiskAccess: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_access, (IOBase, bytes)): + _content = disk_access + else: + _json = self._serialize.body(disk_access, "DiskAccessUpdate") + + request = build_disk_accesses_update_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @overload + async def begin_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: _models.DiskAccessUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskAccess]: + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Patch disk access operation. + Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccessUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskAccess or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskAccess]: + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Patch disk access operation. + Required. + :type disk_access: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskAccess or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: Union[_models.DiskAccessUpdate, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskAccess]: + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Patch disk access operation. + Is either a DiskAccessUpdate type or a IO type. Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccessUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskAccess or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + disk_access=disk_access, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskAccess", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @distributed_trace_async + async def get(self, resource_group_name: str, disk_access_name: str, **kwargs: Any) -> _models.DiskAccess: + """Gets information about a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DiskAccess or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.DiskAccess + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + + request = build_disk_accesses_get_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_accesses_delete_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.DiskAccess"]: + """Lists all the disk access resources under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskAccess or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskAccessList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_accesses_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskAccessList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses" + } + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.DiskAccess"]: + """Lists all the disk access resources under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskAccess or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskAccessList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_accesses_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskAccessList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses"} + + @distributed_trace_async + async def get_private_link_resources( + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> _models.PrivateLinkResourceListResult: + """Gets the private link resources possible under disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PrivateLinkResourceListResult or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.PrivateLinkResourceListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None) + + request = build_disk_accesses_get_private_link_resources_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get_private_link_resources.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get_private_link_resources.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources" + } + + async def _update_a_private_endpoint_connection_initial( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO], + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(private_endpoint_connection, (IOBase, bytes)): + _content = private_endpoint_connection + else: + _json = self._serialize.body(private_endpoint_connection, "PrivateEndpointConnection") + + request = build_disk_accesses_update_a_private_endpoint_connection_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_a_private_endpoint_connection_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_a_private_endpoint_connection_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @overload + async def begin_update_a_private_endpoint_connection( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: _models.PrivateEndpointConnection, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approve or reject a private endpoint connection under disk access resource, this can't be used + to create a new private endpoint connection. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param private_endpoint_connection: private endpoint connection object supplied in the body of + the Put private endpoint connection operation. Required. + :type private_endpoint_connection: + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the + result of cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update_a_private_endpoint_connection( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approve or reject a private endpoint connection under disk access resource, this can't be used + to create a new private endpoint connection. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param private_endpoint_connection: private endpoint connection object supplied in the body of + the Put private endpoint connection operation. Required. + :type private_endpoint_connection: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the + result of cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update_a_private_endpoint_connection( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.PrivateEndpointConnection]: + """Approve or reject a private endpoint connection under disk access resource, this can't be used + to create a new private endpoint connection. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param private_endpoint_connection: private endpoint connection object supplied in the body of + the Put private endpoint connection operation. Is either a PrivateEndpointConnection type or a + IO type. Required. + :type private_endpoint_connection: + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the + result of cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_a_private_endpoint_connection_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + private_endpoint_connection=private_endpoint_connection, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update_a_private_endpoint_connection.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @distributed_trace_async + async def get_a_private_endpoint_connection( + self, resource_group_name: str, disk_access_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Gets information about a private endpoint connection under a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + request = build_disk_accesses_get_a_private_endpoint_connection_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get_a_private_endpoint_connection.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get_a_private_endpoint_connection.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + async def _delete_a_private_endpoint_connection_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_access_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_accesses_delete_a_private_endpoint_connection_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_a_private_endpoint_connection_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_a_private_endpoint_connection_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @distributed_trace_async + async def begin_delete_a_private_endpoint_connection( + self, resource_group_name: str, disk_access_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a private endpoint connection under a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_a_private_endpoint_connection_initial( # type: ignore + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete_a_private_endpoint_connection.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @distributed_trace + def list_private_endpoint_connections( + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> AsyncIterable["_models.PrivateEndpointConnection"]: + """List information about private endpoint connections under a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either PrivateEndpointConnection or the result of + cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_accesses_list_private_endpoint_connections_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_private_endpoint_connections.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_private_endpoint_connections.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections" + } + + +class DiskEncryptionSetsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.aio.ComputeManagementClient`'s + :attr:`disk_encryption_sets` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + async def _create_or_update_initial( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSet, IO], + **kwargs: Any + ) -> _models.DiskEncryptionSet: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_encryption_set, (IOBase, bytes)): + _content = disk_encryption_set + else: + _json = self._serialize.body(disk_encryption_set, "DiskEncryptionSet") + + request = build_disk_encryption_sets_create_or_update_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: _models.DiskEncryptionSet, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskEncryptionSet]: + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Put disk + encryption set operation. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskEncryptionSet]: + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Put disk + encryption set operation. Required. + :type disk_encryption_set: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSet, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskEncryptionSet]: + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Put disk + encryption set operation. Is either a DiskEncryptionSet type or a IO type. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + disk_encryption_set=disk_encryption_set, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + async def _update_initial( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSetUpdate, IO], + **kwargs: Any + ) -> _models.DiskEncryptionSet: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_encryption_set, (IOBase, bytes)): + _content = disk_encryption_set + else: + _json = self._serialize.body(disk_encryption_set, "DiskEncryptionSetUpdate") + + request = build_disk_encryption_sets_update_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @overload + async def begin_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: _models.DiskEncryptionSetUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskEncryptionSet]: + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk + encryption set operation. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskEncryptionSet]: + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk + encryption set operation. Required. + :type disk_encryption_set: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSetUpdate, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.DiskEncryptionSet]: + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk + encryption set operation. Is either a DiskEncryptionSetUpdate type or a IO type. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: + ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + disk_encryption_set=disk_encryption_set, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @distributed_trace_async + async def get( + self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any + ) -> _models.DiskEncryptionSet: + """Gets information about a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DiskEncryptionSet or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + + request = build_disk_encryption_sets_get_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_encryption_sets_delete_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @distributed_trace_async + async def begin_delete( + self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Deletes a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @distributed_trace + def list_by_resource_group( + self, resource_group_name: str, **kwargs: Any + ) -> AsyncIterable["_models.DiskEncryptionSet"]: + """Lists all the disk encryption sets under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskEncryptionSet or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskEncryptionSetList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_encryption_sets_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets" + } + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.DiskEncryptionSet"]: + """Lists all the disk encryption sets under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskEncryptionSet or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskEncryptionSetList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_encryption_sets_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets"} + + @distributed_trace + def list_associated_resources( + self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any + ) -> AsyncIterable[str]: + """Lists all resources that are encrypted with this disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either str or the result of cls(response) + :rtype: ~azure.core.async_paging.AsyncItemPaged[str] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.ResourceUriList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_encryption_sets_list_associated_resources_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_associated_resources.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("ResourceUriList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_associated_resources.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources" + } + + +class DiskRestorePointOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.aio.ComputeManagementClient`'s + :attr:`disk_restore_point` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace_async + async def get( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + **kwargs: Any + ) -> _models.DiskRestorePoint: + """Get disk restorePoint resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DiskRestorePoint or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.DiskRestorePoint + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskRestorePoint] = kwargs.pop("cls", None) + + request = build_disk_restore_point_get_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("DiskRestorePoint", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}" + } + + @distributed_trace + def list_by_restore_point( + self, resource_group_name: str, restore_point_collection_name: str, vm_restore_point_name: str, **kwargs: Any + ) -> AsyncIterable["_models.DiskRestorePoint"]: + """Lists diskRestorePoints under a vmRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskRestorePoint or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskRestorePoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskRestorePointList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_restore_point_list_by_restore_point_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_restore_point.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("DiskRestorePointList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_by_restore_point.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints" + } + + async def _grant_access_initial( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> Optional[_models.AccessUri]: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Optional[_models.AccessUri]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(grant_access_data, (IOBase, bytes)): + _content = grant_access_data + else: + _json = self._serialize.body(grant_access_data, "GrantAccessData") + + request = build_disk_restore_point_grant_access_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._grant_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize("AccessUri", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + _grant_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess" + } + + @overload + async def begin_grant_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: _models.GrantAccessData, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_grant_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_grant_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Is either a GrantAccessData type or a IO type. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AccessUri] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._grant_access_initial( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + grant_access_data=grant_access_data, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AccessUri", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_grant_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess" + } + + async def _revoke_access_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_restore_point_revoke_access_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._revoke_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _revoke_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess" + } + + @distributed_trace_async + async def begin_revoke_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + **kwargs: Any + ) -> AsyncLROPoller[None]: + """Revokes access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._revoke_access_initial( # type: ignore + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_revoke_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess" + } + + +class SnapshotsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.aio.ComputeManagementClient`'s + :attr:`snapshots` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + async def _create_or_update_initial( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.Snapshot, IO], **kwargs: Any + ) -> _models.Snapshot: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(snapshot, (IOBase, bytes)): + _content = snapshot + else: + _json = self._serialize.body(snapshot, "Snapshot") + + request = build_snapshots_create_or_update_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: _models.Snapshot, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Snapshot]: + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk operation. Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.Snapshot + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Snapshot or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_create_or_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Snapshot]: + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk operation. Required. + :type snapshot: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Snapshot or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_create_or_update( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.Snapshot, IO], **kwargs: Any + ) -> AsyncLROPoller[_models.Snapshot]: + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk operation. Is either a + Snapshot type or a IO type. Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.Snapshot or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Snapshot or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._create_or_update_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + snapshot=snapshot, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Snapshot", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + async def _update_initial( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.SnapshotUpdate, IO], **kwargs: Any + ) -> _models.Snapshot: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(snapshot, (IOBase, bytes)): + _content = snapshot + else: + _json = self._serialize.body(snapshot, "SnapshotUpdate") + + request = build_snapshots_update_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @overload + async def begin_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: _models.SnapshotUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Snapshot]: + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. + Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.SnapshotUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Snapshot or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.Snapshot]: + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. + Required. + :type snapshot: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Snapshot or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_update( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.SnapshotUpdate, IO], **kwargs: Any + ) -> AsyncLROPoller[_models.Snapshot]: + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. Is + either a SnapshotUpdate type or a IO type. Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.SnapshotUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either Snapshot or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._update_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + snapshot=snapshot, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Snapshot", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @distributed_trace_async + async def get(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> _models.Snapshot: + """Gets information about a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + request = build_snapshots_get_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + async def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, snapshot_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_snapshots_delete_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @distributed_trace_async + async def begin_delete(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> AsyncLROPoller[None]: + """Deletes a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]: + """Lists snapshots under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.SnapshotList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_snapshots_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots" + } + + @distributed_trace + def list(self, **kwargs: Any) -> AsyncIterable["_models.Snapshot"]: + """Lists snapshots under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.SnapshotList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_snapshots_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + async def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots"} + + async def _grant_access_initial( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> Optional[_models.AccessUri]: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Optional[_models.AccessUri]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(grant_access_data, (IOBase, bytes)): + _content = grant_access_data + else: + _json = self._serialize.body(grant_access_data, "GrantAccessData") + + request = build_snapshots_grant_access_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._grant_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize("AccessUri", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + _grant_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess" + } + + @overload + async def begin_grant_access( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: _models.GrantAccessData, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param grant_access_data: Access data object supplied in the body of the get snapshot access + operation. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def begin_grant_access( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param grant_access_data: Access data object supplied in the body of the get snapshot access + operation. Required. + :type grant_access_data: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def begin_grant_access( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> AsyncLROPoller[_models.AccessUri]: + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param grant_access_data: Access data object supplied in the body of the get snapshot access + operation. Is either a GrantAccessData type or a IO type. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either AccessUri or the result of + cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AccessUri] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._grant_access_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + grant_access_data=grant_access_data, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AccessUri", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_grant_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess" + } + + async def _revoke_access_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, snapshot_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_snapshots_revoke_access_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._revoke_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _revoke_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess" + } + + @distributed_trace_async + async def begin_revoke_access( + self, resource_group_name: str, snapshot_name: str, **kwargs: Any + ) -> AsyncLROPoller[None]: + """Revokes access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for + this operation to not poll, or pass in your own initialized polling object for a personal + polling strategy. + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.AsyncLROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = await self._revoke_access_initial( # type: ignore + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: AsyncPollingMethod = cast( + AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(AsyncPollingMethod, AsyncNoPolling()) + else: + polling_method = polling + if cont_token: + return AsyncLROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_revoke_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess" + } diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_patch.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_patch.py new file mode 100644 index 0000000000000..f7dd32510333d --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/aio/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/__init__.py new file mode 100644 index 0000000000000..da6dfe3cf02d0 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/__init__.py @@ -0,0 +1,161 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models_py3 import AccessUri +from ._models_py3 import ApiError +from ._models_py3 import ApiErrorBase +from ._models_py3 import CopyCompletionError +from ._models_py3 import CreationData +from ._models_py3 import Disk +from ._models_py3 import DiskAccess +from ._models_py3 import DiskAccessList +from ._models_py3 import DiskAccessUpdate +from ._models_py3 import DiskEncryptionSet +from ._models_py3 import DiskEncryptionSetList +from ._models_py3 import DiskEncryptionSetUpdate +from ._models_py3 import DiskList +from ._models_py3 import DiskRestorePoint +from ._models_py3 import DiskRestorePointList +from ._models_py3 import DiskSecurityProfile +from ._models_py3 import DiskSku +from ._models_py3 import DiskUpdate +from ._models_py3 import Encryption +from ._models_py3 import EncryptionSetIdentity +from ._models_py3 import EncryptionSettingsCollection +from ._models_py3 import EncryptionSettingsElement +from ._models_py3 import ExtendedLocation +from ._models_py3 import GrantAccessData +from ._models_py3 import ImageDiskReference +from ._models_py3 import InnerError +from ._models_py3 import KeyForDiskEncryptionSet +from ._models_py3 import KeyVaultAndKeyReference +from ._models_py3 import KeyVaultAndSecretReference +from ._models_py3 import PrivateEndpoint +from ._models_py3 import PrivateEndpointConnection +from ._models_py3 import PrivateEndpointConnectionListResult +from ._models_py3 import PrivateLinkResource +from ._models_py3 import PrivateLinkResourceListResult +from ._models_py3 import PrivateLinkServiceConnectionState +from ._models_py3 import PropertyUpdatesInProgress +from ._models_py3 import ProxyOnlyResource +from ._models_py3 import PurchasePlan +from ._models_py3 import Resource +from ._models_py3 import ResourceUriList +from ._models_py3 import ResourceWithOptionalLocation +from ._models_py3 import ShareInfoElement +from ._models_py3 import Snapshot +from ._models_py3 import SnapshotList +from ._models_py3 import SnapshotSku +from ._models_py3 import SnapshotUpdate +from ._models_py3 import SourceVault +from ._models_py3 import SubResource +from ._models_py3 import SubResourceReadOnly +from ._models_py3 import SupportedCapabilities +from ._models_py3 import SystemData +from ._models_py3 import UserAssignedIdentitiesValue + +from ._compute_management_client_enums import AccessLevel +from ._compute_management_client_enums import Architecture +from ._compute_management_client_enums import CopyCompletionErrorReason +from ._compute_management_client_enums import DataAccessAuthMode +from ._compute_management_client_enums import DiskCreateOption +from ._compute_management_client_enums import DiskEncryptionSetIdentityType +from ._compute_management_client_enums import DiskEncryptionSetType +from ._compute_management_client_enums import DiskSecurityTypes +from ._compute_management_client_enums import DiskState +from ._compute_management_client_enums import DiskStorageAccountTypes +from ._compute_management_client_enums import EncryptionType +from ._compute_management_client_enums import ExtendedLocationTypes +from ._compute_management_client_enums import FileFormat +from ._compute_management_client_enums import HyperVGeneration +from ._compute_management_client_enums import NetworkAccessPolicy +from ._compute_management_client_enums import OperatingSystemTypes +from ._compute_management_client_enums import PrivateEndpointConnectionProvisioningState +from ._compute_management_client_enums import PrivateEndpointServiceConnectionStatus +from ._compute_management_client_enums import PublicNetworkAccess +from ._compute_management_client_enums import SnapshotStorageAccountTypes +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AccessUri", + "ApiError", + "ApiErrorBase", + "CopyCompletionError", + "CreationData", + "Disk", + "DiskAccess", + "DiskAccessList", + "DiskAccessUpdate", + "DiskEncryptionSet", + "DiskEncryptionSetList", + "DiskEncryptionSetUpdate", + "DiskList", + "DiskRestorePoint", + "DiskRestorePointList", + "DiskSecurityProfile", + "DiskSku", + "DiskUpdate", + "Encryption", + "EncryptionSetIdentity", + "EncryptionSettingsCollection", + "EncryptionSettingsElement", + "ExtendedLocation", + "GrantAccessData", + "ImageDiskReference", + "InnerError", + "KeyForDiskEncryptionSet", + "KeyVaultAndKeyReference", + "KeyVaultAndSecretReference", + "PrivateEndpoint", + "PrivateEndpointConnection", + "PrivateEndpointConnectionListResult", + "PrivateLinkResource", + "PrivateLinkResourceListResult", + "PrivateLinkServiceConnectionState", + "PropertyUpdatesInProgress", + "ProxyOnlyResource", + "PurchasePlan", + "Resource", + "ResourceUriList", + "ResourceWithOptionalLocation", + "ShareInfoElement", + "Snapshot", + "SnapshotList", + "SnapshotSku", + "SnapshotUpdate", + "SourceVault", + "SubResource", + "SubResourceReadOnly", + "SupportedCapabilities", + "SystemData", + "UserAssignedIdentitiesValue", + "AccessLevel", + "Architecture", + "CopyCompletionErrorReason", + "DataAccessAuthMode", + "DiskCreateOption", + "DiskEncryptionSetIdentityType", + "DiskEncryptionSetType", + "DiskSecurityTypes", + "DiskState", + "DiskStorageAccountTypes", + "EncryptionType", + "ExtendedLocationTypes", + "FileFormat", + "HyperVGeneration", + "NetworkAccessPolicy", + "OperatingSystemTypes", + "PrivateEndpointConnectionProvisioningState", + "PrivateEndpointServiceConnectionStatus", + "PublicNetworkAccess", + "SnapshotStorageAccountTypes", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_compute_management_client_enums.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_compute_management_client_enums.py new file mode 100644 index 0000000000000..601c7c2b0d72c --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_compute_management_client_enums.py @@ -0,0 +1,266 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AccessLevel(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccessLevel.""" + + NONE = "None" + READ = "Read" + WRITE = "Write" + + +class Architecture(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """CPU architecture supported by an OS disk.""" + + X64 = "x64" + ARM64 = "Arm64" + + +class CopyCompletionErrorReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Indicates the error code if the background copy of a resource created via the CopyStart + operation fails. + """ + + COPY_SOURCE_NOT_FOUND = "CopySourceNotFound" + """Indicates that the source snapshot was deleted while the background copy of the resource + #: created via CopyStart operation was in progress.""" + + +class DataAccessAuthMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Additional authentication requirements when exporting or uploading to a disk or snapshot.""" + + AZURE_ACTIVE_DIRECTORY = "AzureActiveDirectory" + """When export/upload URL is used, the system checks if the user has an identity in Azure Active + #: Directory and has necessary permissions to export/upload the data. Please refer to + #: aka.ms/DisksAzureADAuth.""" + NONE = "None" + """No additional authentication would be performed when accessing export/upload URL.""" + + +class DiskCreateOption(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This enumerates the possible sources of a disk's creation.""" + + EMPTY = "Empty" + """Create an empty data disk of a size given by diskSizeGB.""" + ATTACH = "Attach" + """Disk will be attached to a VM.""" + FROM_IMAGE = "FromImage" + """Create a new disk from a platform image specified by the given imageReference or + #: galleryImageReference.""" + IMPORT = "Import" + """Create a disk by importing from a blob specified by a sourceUri in a storage account specified + #: by storageAccountId.""" + COPY = "Copy" + """Create a new disk or snapshot by copying from a disk or snapshot specified by the given + #: sourceResourceId.""" + RESTORE = "Restore" + """Create a new disk by copying from a backup recovery point.""" + UPLOAD = "Upload" + """Create a new disk by obtaining a write token and using it to directly upload the contents of + #: the disk.""" + COPY_START = "CopyStart" + """Create a new disk by using a deep copy process, where the resource creation is considered + #: complete only after all data has been copied from the source.""" + IMPORT_SECURE = "ImportSecure" + """Similar to Import create option. Create a new Trusted Launch VM or Confidential VM supported + #: disk by importing additional blob for VM guest state specified by securityDataUri in storage + #: account specified by storageAccountId""" + UPLOAD_PREPARED_SECURE = "UploadPreparedSecure" + """Similar to Upload create option. Create a new Trusted Launch VM or Confidential VM supported + #: disk and upload using write token in both disk and VM guest state""" + IMPORT_ENUM = "Import" + """Create a disk by importing from a blob specified by a sourceUri in a storage account specified + #: by storageAccountId.""" + + +class DiskEncryptionSetIdentityType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported + for new creations. Disk Encryption Sets can be updated with Identity type None during migration + of subscription to a new Azure Active Directory tenant; it will cause the encrypted resources + to lose access to the keys. + """ + + SYSTEM_ASSIGNED = "SystemAssigned" + USER_ASSIGNED = "UserAssigned" + SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned" + NONE = "None" + + +class DiskEncryptionSetType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of key used to encrypt the data of the disk.""" + + ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY = "EncryptionAtRestWithCustomerKey" + """Resource using diskEncryptionSet would be encrypted at rest with Customer managed key that can + #: be changed and revoked by a customer.""" + ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS = "EncryptionAtRestWithPlatformAndCustomerKeys" + """Resource using diskEncryptionSet would be encrypted at rest with two layers of encryption. One + #: of the keys is Customer managed and the other key is Platform managed.""" + CONFIDENTIAL_VM_ENCRYPTED_WITH_CUSTOMER_KEY = "ConfidentialVmEncryptedWithCustomerKey" + """Confidential VM supported disk and VM guest state would be encrypted with customer managed key.""" + + +class DiskSecurityTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the SecurityType of the VM. Applicable for OS disks only.""" + + TRUSTED_LAUNCH = "TrustedLaunch" + """Trusted Launch provides security features such as secure boot and virtual Trusted Platform + #: Module (vTPM)""" + CONFIDENTIAL_VM_VMGUEST_STATE_ONLY_ENCRYPTED_WITH_PLATFORM_KEY = ( + "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey" + ) + """Indicates Confidential VM disk with only VM guest state encrypted""" + CONFIDENTIAL_VM_DISK_ENCRYPTED_WITH_PLATFORM_KEY = "ConfidentialVM_DiskEncryptedWithPlatformKey" + """Indicates Confidential VM disk with both OS disk and VM guest state encrypted with a platform + #: managed key""" + CONFIDENTIAL_VM_DISK_ENCRYPTED_WITH_CUSTOMER_KEY = "ConfidentialVM_DiskEncryptedWithCustomerKey" + """Indicates Confidential VM disk with both OS disk and VM guest state encrypted with a customer + #: managed key""" + + +class DiskState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This enumerates the possible state of the disk.""" + + UNATTACHED = "Unattached" + """The disk is not being used and can be attached to a VM.""" + ATTACHED = "Attached" + """The disk is currently attached to a running VM.""" + RESERVED = "Reserved" + """The disk is attached to a stopped-deallocated VM.""" + FROZEN = "Frozen" + """The disk is attached to a VM which is in hibernated state.""" + ACTIVE_SAS = "ActiveSAS" + """The disk currently has an Active SAS Uri associated with it.""" + ACTIVE_SAS_FROZEN = "ActiveSASFrozen" + """The disk is attached to a VM in hibernated state and has an active SAS URI associated with it.""" + READY_TO_UPLOAD = "ReadyToUpload" + """A disk is ready to be created by upload by requesting a write token.""" + ACTIVE_UPLOAD = "ActiveUpload" + """A disk is created for upload and a write token has been issued for uploading to it.""" + + +class DiskStorageAccountTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The sku name.""" + + STANDARD_LRS = "Standard_LRS" + """Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent access.""" + PREMIUM_LRS = "Premium_LRS" + """Premium SSD locally redundant storage. Best for production and performance sensitive workloads.""" + STANDARD_SSD_LRS = "StandardSSD_LRS" + """Standard SSD locally redundant storage. Best for web servers, lightly used enterprise + #: applications and dev/test.""" + ULTRA_SSD_LRS = "UltraSSD_LRS" + """Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, top tier + #: databases (for example, SQL, Oracle), and other transaction-heavy workloads.""" + PREMIUM_ZRS = "Premium_ZRS" + """Premium SSD zone redundant storage. Best for the production workloads that need storage + #: resiliency against zone failures.""" + STANDARD_SSD_ZRS = "StandardSSD_ZRS" + """Standard SSD zone redundant storage. Best for web servers, lightly used enterprise applications + #: and dev/test that need storage resiliency against zone failures.""" + PREMIUM_V2_LRS = "PremiumV2_LRS" + """Premium SSD v2 locally redundant storage. Best for production and performance-sensitive + #: workloads that consistently require low latency and high IOPS and throughput.""" + + +class EncryptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of key used to encrypt the data of the disk.""" + + ENCRYPTION_AT_REST_WITH_PLATFORM_KEY = "EncryptionAtRestWithPlatformKey" + """Disk is encrypted at rest with Platform managed key. It is the default encryption type. This is + #: not a valid encryption type for disk encryption sets.""" + ENCRYPTION_AT_REST_WITH_CUSTOMER_KEY = "EncryptionAtRestWithCustomerKey" + """Disk is encrypted at rest with Customer managed key that can be changed and revoked by a + #: customer.""" + ENCRYPTION_AT_REST_WITH_PLATFORM_AND_CUSTOMER_KEYS = "EncryptionAtRestWithPlatformAndCustomerKeys" + """Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and + #: the other key is Platform managed.""" + + +class ExtendedLocationTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The type of extendedLocation.""" + + EDGE_ZONE = "EdgeZone" + + +class FileFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Used to specify the file format when making request for SAS on a VHDX file format snapshot.""" + + VHD = "VHD" + """A VHD file is a disk image file in the Virtual Hard Disk file format.""" + VHDX = "VHDX" + """A VHDX file is a disk image file in the Virtual Hard Disk v2 file format.""" + + +class HyperVGeneration(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The hypervisor generation of the Virtual Machine. Applicable to OS disks only.""" + + V1 = "V1" + V2 = "V2" + + +class NetworkAccessPolicy(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Policy for accessing the disk via network.""" + + ALLOW_ALL = "AllowAll" + """The disk can be exported or uploaded to from any network.""" + ALLOW_PRIVATE = "AllowPrivate" + """The disk can be exported or uploaded to using a DiskAccess resource's private endpoints.""" + DENY_ALL = "DenyAll" + """The disk cannot be exported.""" + + +class OperatingSystemTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The Operating System type.""" + + WINDOWS = "Windows" + LINUX = "Linux" + + +class PrivateEndpointConnectionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current provisioning state.""" + + SUCCEEDED = "Succeeded" + CREATING = "Creating" + DELETING = "Deleting" + FAILED = "Failed" + + +class PrivateEndpointServiceConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The private endpoint connection status.""" + + PENDING = "Pending" + APPROVED = "Approved" + REJECTED = "Rejected" + + +class PublicNetworkAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Policy for controlling export on the disk.""" + + ENABLED = "Enabled" + """You can generate a SAS URI to access the underlying data of the disk publicly on the internet + #: when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from + #: your trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate.""" + DISABLED = "Disabled" + """You cannot access the underlying data of the disk publicly on the internet even when + #: NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your + #: trusted Azure VNET when NetworkAccessPolicy is set to AllowPrivate.""" + + +class SnapshotStorageAccountTypes(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The sku name.""" + + STANDARD_LRS = "Standard_LRS" + """Standard HDD locally redundant storage""" + PREMIUM_LRS = "Premium_LRS" + """Premium SSD locally redundant storage""" + STANDARD_ZRS = "Standard_ZRS" + """Standard zone redundant storage""" diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_models_py3.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_models_py3.py new file mode 100644 index 0000000000000..02d7f00a3a37b --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_models_py3.py @@ -0,0 +1,3176 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union + +from ... import _serialization + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models + + +class AccessUri(_serialization.Model): + """A disk access SAS uri. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar access_sas: A SAS uri for accessing a disk. + :vartype access_sas: str + :ivar security_data_access_sas: A SAS uri for accessing a VM guest state. + :vartype security_data_access_sas: str + """ + + _validation = { + "access_sas": {"readonly": True}, + "security_data_access_sas": {"readonly": True}, + } + + _attribute_map = { + "access_sas": {"key": "accessSAS", "type": "str"}, + "security_data_access_sas": {"key": "securityDataAccessSAS", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.access_sas = None + self.security_data_access_sas = None + + +class ApiError(_serialization.Model): + """Api error. + + :ivar details: The Api error details. + :vartype details: list[~azure.mgmt.compute.v2023_01_02.models.ApiErrorBase] + :ivar innererror: The Api inner error. + :vartype innererror: ~azure.mgmt.compute.v2023_01_02.models.InnerError + :ivar code: The error code. + :vartype code: str + :ivar target: The target of the particular error. + :vartype target: str + :ivar message: The error message. + :vartype message: str + """ + + _attribute_map = { + "details": {"key": "details", "type": "[ApiErrorBase]"}, + "innererror": {"key": "innererror", "type": "InnerError"}, + "code": {"key": "code", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "message": {"key": "message", "type": "str"}, + } + + def __init__( + self, + *, + details: Optional[List["_models.ApiErrorBase"]] = None, + innererror: Optional["_models.InnerError"] = None, + code: Optional[str] = None, + target: Optional[str] = None, + message: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword details: The Api error details. + :paramtype details: list[~azure.mgmt.compute.v2023_01_02.models.ApiErrorBase] + :keyword innererror: The Api inner error. + :paramtype innererror: ~azure.mgmt.compute.v2023_01_02.models.InnerError + :keyword code: The error code. + :paramtype code: str + :keyword target: The target of the particular error. + :paramtype target: str + :keyword message: The error message. + :paramtype message: str + """ + super().__init__(**kwargs) + self.details = details + self.innererror = innererror + self.code = code + self.target = target + self.message = message + + +class ApiErrorBase(_serialization.Model): + """Api error base. + + :ivar code: The error code. + :vartype code: str + :ivar target: The target of the particular error. + :vartype target: str + :ivar message: The error message. + :vartype message: str + """ + + _attribute_map = { + "code": {"key": "code", "type": "str"}, + "target": {"key": "target", "type": "str"}, + "message": {"key": "message", "type": "str"}, + } + + def __init__( + self, *, code: Optional[str] = None, target: Optional[str] = None, message: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword code: The error code. + :paramtype code: str + :keyword target: The target of the particular error. + :paramtype target: str + :keyword message: The error message. + :paramtype message: str + """ + super().__init__(**kwargs) + self.code = code + self.target = target + self.message = message + + +class CopyCompletionError(_serialization.Model): + """Indicates the error details if the background copy of a resource created via the CopyStart + operation fails. + + All required parameters must be populated in order to send to Azure. + + :ivar error_code: Indicates the error code if the background copy of a resource created via the + CopyStart operation fails. Required. "CopySourceNotFound" + :vartype error_code: str or ~azure.mgmt.compute.v2023_01_02.models.CopyCompletionErrorReason + :ivar error_message: Indicates the error message if the background copy of a resource created + via the CopyStart operation fails. Required. + :vartype error_message: str + """ + + _validation = { + "error_code": {"required": True}, + "error_message": {"required": True}, + } + + _attribute_map = { + "error_code": {"key": "errorCode", "type": "str"}, + "error_message": {"key": "errorMessage", "type": "str"}, + } + + def __init__( + self, *, error_code: Union[str, "_models.CopyCompletionErrorReason"], error_message: str, **kwargs: Any + ) -> None: + """ + :keyword error_code: Indicates the error code if the background copy of a resource created via + the CopyStart operation fails. Required. "CopySourceNotFound" + :paramtype error_code: str or ~azure.mgmt.compute.v2023_01_02.models.CopyCompletionErrorReason + :keyword error_message: Indicates the error message if the background copy of a resource + created via the CopyStart operation fails. Required. + :paramtype error_message: str + """ + super().__init__(**kwargs) + self.error_code = error_code + self.error_message = error_message + + +class CreationData(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Data used when creating a disk. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar create_option: This enumerates the possible sources of a disk's creation. Required. Known + values are: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload", "CopyStart", + "ImportSecure", "UploadPreparedSecure", and "Import". + :vartype create_option: str or ~azure.mgmt.compute.v2023_01_02.models.DiskCreateOption + :ivar storage_account_id: Required if createOption is Import. The Azure Resource Manager + identifier of the storage account containing the blob to import as a disk. + :vartype storage_account_id: str + :ivar image_reference: Disk source information for PIR or user images. + :vartype image_reference: ~azure.mgmt.compute.v2023_01_02.models.ImageDiskReference + :ivar gallery_image_reference: Required if creating from a Gallery Image. The + id/sharedGalleryImageId/communityGalleryImageId of the ImageDiskReference will be the ARM id of + the shared galley image version from which to create a disk. + :vartype gallery_image_reference: ~azure.mgmt.compute.v2023_01_02.models.ImageDiskReference + :ivar source_uri: If createOption is Import, this is the URI of a blob to be imported into a + managed disk. + :vartype source_uri: str + :ivar source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot or + disk. + :vartype source_resource_id: str + :ivar source_unique_id: If this field is set, this is the unique id identifying the source of + this resource. + :vartype source_unique_id: str + :ivar upload_size_bytes: If createOption is Upload, this is the size of the contents of the + upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for + the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). + :vartype upload_size_bytes: int + :ivar logical_sector_size: Logical sector size in bytes for Ultra disks. Supported values are + 512 ad 4096. 4096 is the default. + :vartype logical_sector_size: int + :ivar security_data_uri: If createOption is ImportSecure, this is the URI of a blob to be + imported into VM guest state. + :vartype security_data_uri: str + :ivar performance_plus: Set this flag to true to get a boost on the performance target of the + disk deployed, see here on the respective performance target. This flag can only be set on disk + creation time and cannot be disabled after enabled. + :vartype performance_plus: bool + """ + + _validation = { + "create_option": {"required": True}, + "source_unique_id": {"readonly": True}, + } + + _attribute_map = { + "create_option": {"key": "createOption", "type": "str"}, + "storage_account_id": {"key": "storageAccountId", "type": "str"}, + "image_reference": {"key": "imageReference", "type": "ImageDiskReference"}, + "gallery_image_reference": {"key": "galleryImageReference", "type": "ImageDiskReference"}, + "source_uri": {"key": "sourceUri", "type": "str"}, + "source_resource_id": {"key": "sourceResourceId", "type": "str"}, + "source_unique_id": {"key": "sourceUniqueId", "type": "str"}, + "upload_size_bytes": {"key": "uploadSizeBytes", "type": "int"}, + "logical_sector_size": {"key": "logicalSectorSize", "type": "int"}, + "security_data_uri": {"key": "securityDataUri", "type": "str"}, + "performance_plus": {"key": "performancePlus", "type": "bool"}, + } + + def __init__( + self, + *, + create_option: Union[str, "_models.DiskCreateOption"], + storage_account_id: Optional[str] = None, + image_reference: Optional["_models.ImageDiskReference"] = None, + gallery_image_reference: Optional["_models.ImageDiskReference"] = None, + source_uri: Optional[str] = None, + source_resource_id: Optional[str] = None, + upload_size_bytes: Optional[int] = None, + logical_sector_size: Optional[int] = None, + security_data_uri: Optional[str] = None, + performance_plus: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword create_option: This enumerates the possible sources of a disk's creation. Required. + Known values are: "Empty", "Attach", "FromImage", "Import", "Copy", "Restore", "Upload", + "CopyStart", "ImportSecure", "UploadPreparedSecure", and "Import". + :paramtype create_option: str or ~azure.mgmt.compute.v2023_01_02.models.DiskCreateOption + :keyword storage_account_id: Required if createOption is Import. The Azure Resource Manager + identifier of the storage account containing the blob to import as a disk. + :paramtype storage_account_id: str + :keyword image_reference: Disk source information for PIR or user images. + :paramtype image_reference: ~azure.mgmt.compute.v2023_01_02.models.ImageDiskReference + :keyword gallery_image_reference: Required if creating from a Gallery Image. The + id/sharedGalleryImageId/communityGalleryImageId of the ImageDiskReference will be the ARM id of + the shared galley image version from which to create a disk. + :paramtype gallery_image_reference: ~azure.mgmt.compute.v2023_01_02.models.ImageDiskReference + :keyword source_uri: If createOption is Import, this is the URI of a blob to be imported into a + managed disk. + :paramtype source_uri: str + :keyword source_resource_id: If createOption is Copy, this is the ARM id of the source snapshot + or disk. + :paramtype source_resource_id: str + :keyword upload_size_bytes: If createOption is Upload, this is the size of the contents of the + upload including the VHD footer. This value should be between 20972032 (20 MiB + 512 bytes for + the VHD footer) and 35183298347520 bytes (32 TiB + 512 bytes for the VHD footer). + :paramtype upload_size_bytes: int + :keyword logical_sector_size: Logical sector size in bytes for Ultra disks. Supported values + are 512 ad 4096. 4096 is the default. + :paramtype logical_sector_size: int + :keyword security_data_uri: If createOption is ImportSecure, this is the URI of a blob to be + imported into VM guest state. + :paramtype security_data_uri: str + :keyword performance_plus: Set this flag to true to get a boost on the performance target of + the disk deployed, see here on the respective performance target. This flag can only be set on + disk creation time and cannot be disabled after enabled. + :paramtype performance_plus: bool + """ + super().__init__(**kwargs) + self.create_option = create_option + self.storage_account_id = storage_account_id + self.image_reference = image_reference + self.gallery_image_reference = gallery_image_reference + self.source_uri = source_uri + self.source_resource_id = source_resource_id + self.source_unique_id = None + self.upload_size_bytes = upload_size_bytes + self.logical_sector_size = logical_sector_size + self.security_data_uri = security_data_uri + self.performance_plus = performance_plus + + +class Resource(_serialization.Model): + """The Resource model definition. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar location: Resource location. Required. + :vartype location: str + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "location": {"required": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "location": {"key": "location", "type": "str"}, + "tags": {"key": "tags", "type": "{str}"}, + } + + def __init__(self, *, location: str, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + """ + :keyword location: Resource location. Required. + :paramtype location: str + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.location = location + self.tags = tags + + +class Disk(Resource): # pylint: disable=too-many-instance-attributes + """Disk resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar location: Resource location. Required. + :vartype location: str + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar managed_by: A relative URI containing the ID of the VM that has the disk attached. + :vartype managed_by: str + :ivar managed_by_extended: List of relative URIs containing the IDs of the VMs that have the + disk attached. maxShares should be set to a value greater than one for disks to allow attaching + them to multiple VMs. + :vartype managed_by_extended: list[str] + :ivar sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS, + Premium_ZRS, StandardSSD_ZRS, or PremiumV2_LRS. + :vartype sku: ~azure.mgmt.compute.v2023_01_02.models.DiskSku + :ivar zones: The Logical zone list for Disk. + :vartype zones: list[str] + :ivar extended_location: The extended location where the disk will be created. Extended + location cannot be changed. + :vartype extended_location: ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocation + :ivar time_created: The time when the disk was created. + :vartype time_created: ~datetime.datetime + :ivar os_type: The Operating System type. Known values are: "Windows" and "Linux". + :vartype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :ivar hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS + disks only. Known values are: "V1" and "V2". + :vartype hyper_v_generation: str or ~azure.mgmt.compute.v2023_01_02.models.HyperVGeneration + :ivar purchase_plan: Purchase plan information for the the image from which the OS disk was + created. E.g. - {name: 2019-Datacenter, publisher: MicrosoftWindowsServer, product: + WindowsServer}. + :vartype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :ivar supported_capabilities: List of supported capabilities for the image from which the OS + disk was created. + :vartype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :ivar creation_data: Disk source information. CreationData information cannot be changed after + the disk has been created. + :vartype creation_data: ~azure.mgmt.compute.v2023_01_02.models.CreationData + :ivar disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :vartype disk_size_gb: int + :ivar disk_size_bytes: The size of the disk in bytes. This field is read only. + :vartype disk_size_bytes: int + :ivar unique_id: Unique Guid identifying the resource. + :vartype unique_id: str + :ivar encryption_settings_collection: Encryption settings collection used for Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :vartype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :ivar provisioning_state: The disk provisioning state. + :vartype provisioning_state: str + :ivar disk_iops_read_write: The number of IOPS allowed for this disk; only settable for + UltraSSD disks. One operation can transfer between 4k and 256k bytes. + :vartype disk_iops_read_write: int + :ivar disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD + disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :vartype disk_m_bps_read_write: int + :ivar disk_iops_read_only: The total number of IOPS that will be allowed across all VMs + mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. + :vartype disk_iops_read_only: int + :ivar disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs + mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses + the ISO notation, of powers of 10. + :vartype disk_m_bps_read_only: int + :ivar disk_state: The state of the disk. Known values are: "Unattached", "Attached", + "Reserved", "Frozen", "ActiveSAS", "ActiveSASFrozen", "ReadyToUpload", and "ActiveUpload". + :vartype disk_state: str or ~azure.mgmt.compute.v2023_01_02.models.DiskState + :ivar encryption: Encryption property can be used to encrypt data at rest with customer managed + keys or platform managed keys. + :vartype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :ivar max_shares: The maximum number of VMs that can attach to the disk at the same time. Value + greater than one indicates a disk that can be mounted on multiple VMs at the same time. + :vartype max_shares: int + :ivar share_info: Details of the list of all VMs that have the disk attached. maxShares should + be set to a value greater than one for disks to allow attaching them to multiple VMs. + :vartype share_info: list[~azure.mgmt.compute.v2023_01_02.models.ShareInfoElement] + :ivar network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :vartype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :ivar disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks. + :vartype disk_access_id: str + :ivar bursting_enabled_time: Latest time when bursting was last enabled on a disk. + :vartype bursting_enabled_time: ~datetime.datetime + :ivar tier: Performance tier of the disk (e.g, P4, S10) as described here: + https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra + disks. + :vartype tier: str + :ivar bursting_enabled: Set to true to enable bursting beyond the provisioned performance + target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. + :vartype bursting_enabled: bool + :ivar property_updates_in_progress: Properties of the disk for which update is pending. + :vartype property_updates_in_progress: + ~azure.mgmt.compute.v2023_01_02.models.PropertyUpdatesInProgress + :ivar supports_hibernation: Indicates the OS on a disk supports hibernation. + :vartype supports_hibernation: bool + :ivar security_profile: Contains the security related information for the resource. + :vartype security_profile: ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityProfile + :ivar completion_percent: Percentage complete for the background copy when a resource is + created via the CopyStart operation. + :vartype completion_percent: float + :ivar public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :ivar data_access_auth_mode: Additional authentication requirements when exporting or uploading + to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :vartype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + :ivar optimized_for_frequent_attach: Setting this property to true improves reliability and + performance of data disks that are frequently (more than 5 times a day) by detached from one + virtual machine and attached to another. This property should not be set for disks that are not + detached and attached frequently as it causes the disks to not align with the fault domain of + the virtual machine. + :vartype optimized_for_frequent_attach: bool + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "location": {"required": True}, + "managed_by": {"readonly": True}, + "managed_by_extended": {"readonly": True}, + "time_created": {"readonly": True}, + "disk_size_bytes": {"readonly": True}, + "unique_id": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "disk_state": {"readonly": True}, + "share_info": {"readonly": True}, + "bursting_enabled_time": {"readonly": True}, + "property_updates_in_progress": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "location": {"key": "location", "type": "str"}, + "tags": {"key": "tags", "type": "{str}"}, + "managed_by": {"key": "managedBy", "type": "str"}, + "managed_by_extended": {"key": "managedByExtended", "type": "[str]"}, + "sku": {"key": "sku", "type": "DiskSku"}, + "zones": {"key": "zones", "type": "[str]"}, + "extended_location": {"key": "extendedLocation", "type": "ExtendedLocation"}, + "time_created": {"key": "properties.timeCreated", "type": "iso-8601"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "hyper_v_generation": {"key": "properties.hyperVGeneration", "type": "str"}, + "purchase_plan": {"key": "properties.purchasePlan", "type": "PurchasePlan"}, + "supported_capabilities": {"key": "properties.supportedCapabilities", "type": "SupportedCapabilities"}, + "creation_data": {"key": "properties.creationData", "type": "CreationData"}, + "disk_size_gb": {"key": "properties.diskSizeGB", "type": "int"}, + "disk_size_bytes": {"key": "properties.diskSizeBytes", "type": "int"}, + "unique_id": {"key": "properties.uniqueId", "type": "str"}, + "encryption_settings_collection": { + "key": "properties.encryptionSettingsCollection", + "type": "EncryptionSettingsCollection", + }, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "disk_iops_read_write": {"key": "properties.diskIOPSReadWrite", "type": "int"}, + "disk_m_bps_read_write": {"key": "properties.diskMBpsReadWrite", "type": "int"}, + "disk_iops_read_only": {"key": "properties.diskIOPSReadOnly", "type": "int"}, + "disk_m_bps_read_only": {"key": "properties.diskMBpsReadOnly", "type": "int"}, + "disk_state": {"key": "properties.diskState", "type": "str"}, + "encryption": {"key": "properties.encryption", "type": "Encryption"}, + "max_shares": {"key": "properties.maxShares", "type": "int"}, + "share_info": {"key": "properties.shareInfo", "type": "[ShareInfoElement]"}, + "network_access_policy": {"key": "properties.networkAccessPolicy", "type": "str"}, + "disk_access_id": {"key": "properties.diskAccessId", "type": "str"}, + "bursting_enabled_time": {"key": "properties.burstingEnabledTime", "type": "iso-8601"}, + "tier": {"key": "properties.tier", "type": "str"}, + "bursting_enabled": {"key": "properties.burstingEnabled", "type": "bool"}, + "property_updates_in_progress": { + "key": "properties.propertyUpdatesInProgress", + "type": "PropertyUpdatesInProgress", + }, + "supports_hibernation": {"key": "properties.supportsHibernation", "type": "bool"}, + "security_profile": {"key": "properties.securityProfile", "type": "DiskSecurityProfile"}, + "completion_percent": {"key": "properties.completionPercent", "type": "float"}, + "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"}, + "data_access_auth_mode": {"key": "properties.dataAccessAuthMode", "type": "str"}, + "optimized_for_frequent_attach": {"key": "properties.optimizedForFrequentAttach", "type": "bool"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + sku: Optional["_models.DiskSku"] = None, + zones: Optional[List[str]] = None, + extended_location: Optional["_models.ExtendedLocation"] = None, + os_type: Optional[Union[str, "_models.OperatingSystemTypes"]] = None, + hyper_v_generation: Optional[Union[str, "_models.HyperVGeneration"]] = None, + purchase_plan: Optional["_models.PurchasePlan"] = None, + supported_capabilities: Optional["_models.SupportedCapabilities"] = None, + creation_data: Optional["_models.CreationData"] = None, + disk_size_gb: Optional[int] = None, + encryption_settings_collection: Optional["_models.EncryptionSettingsCollection"] = None, + disk_iops_read_write: Optional[int] = None, + disk_m_bps_read_write: Optional[int] = None, + disk_iops_read_only: Optional[int] = None, + disk_m_bps_read_only: Optional[int] = None, + encryption: Optional["_models.Encryption"] = None, + max_shares: Optional[int] = None, + network_access_policy: Optional[Union[str, "_models.NetworkAccessPolicy"]] = None, + disk_access_id: Optional[str] = None, + tier: Optional[str] = None, + bursting_enabled: Optional[bool] = None, + supports_hibernation: Optional[bool] = None, + security_profile: Optional["_models.DiskSecurityProfile"] = None, + completion_percent: Optional[float] = None, + public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None, + data_access_auth_mode: Optional[Union[str, "_models.DataAccessAuthMode"]] = None, + optimized_for_frequent_attach: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword location: Resource location. Required. + :paramtype location: str + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, + UltraSSD_LRS, Premium_ZRS, StandardSSD_ZRS, or PremiumV2_LRS. + :paramtype sku: ~azure.mgmt.compute.v2023_01_02.models.DiskSku + :keyword zones: The Logical zone list for Disk. + :paramtype zones: list[str] + :keyword extended_location: The extended location where the disk will be created. Extended + location cannot be changed. + :paramtype extended_location: ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocation + :keyword os_type: The Operating System type. Known values are: "Windows" and "Linux". + :paramtype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :keyword hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS + disks only. Known values are: "V1" and "V2". + :paramtype hyper_v_generation: str or ~azure.mgmt.compute.v2023_01_02.models.HyperVGeneration + :keyword purchase_plan: Purchase plan information for the the image from which the OS disk was + created. E.g. - {name: 2019-Datacenter, publisher: MicrosoftWindowsServer, product: + WindowsServer}. + :paramtype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :keyword supported_capabilities: List of supported capabilities for the image from which the OS + disk was created. + :paramtype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :keyword creation_data: Disk source information. CreationData information cannot be changed + after the disk has been created. + :paramtype creation_data: ~azure.mgmt.compute.v2023_01_02.models.CreationData + :keyword disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :paramtype disk_size_gb: int + :keyword encryption_settings_collection: Encryption settings collection used for Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :paramtype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :keyword disk_iops_read_write: The number of IOPS allowed for this disk; only settable for + UltraSSD disks. One operation can transfer between 4k and 256k bytes. + :paramtype disk_iops_read_write: int + :keyword disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD + disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :paramtype disk_m_bps_read_write: int + :keyword disk_iops_read_only: The total number of IOPS that will be allowed across all VMs + mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. + :paramtype disk_iops_read_only: int + :keyword disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs + mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses + the ISO notation, of powers of 10. + :paramtype disk_m_bps_read_only: int + :keyword encryption: Encryption property can be used to encrypt data at rest with customer + managed keys or platform managed keys. + :paramtype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :keyword max_shares: The maximum number of VMs that can attach to the disk at the same time. + Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + :paramtype max_shares: int + :keyword network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :paramtype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :keyword disk_access_id: ARM id of the DiskAccess resource for using private endpoints on + disks. + :paramtype disk_access_id: str + :keyword tier: Performance tier of the disk (e.g, P4, S10) as described here: + https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra + disks. + :paramtype tier: str + :keyword bursting_enabled: Set to true to enable bursting beyond the provisioned performance + target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. + :paramtype bursting_enabled: bool + :keyword supports_hibernation: Indicates the OS on a disk supports hibernation. + :paramtype supports_hibernation: bool + :keyword security_profile: Contains the security related information for the resource. + :paramtype security_profile: ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityProfile + :keyword completion_percent: Percentage complete for the background copy when a resource is + created via the CopyStart operation. + :paramtype completion_percent: float + :keyword public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :paramtype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :keyword data_access_auth_mode: Additional authentication requirements when exporting or + uploading to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :paramtype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + :keyword optimized_for_frequent_attach: Setting this property to true improves reliability and + performance of data disks that are frequently (more than 5 times a day) by detached from one + virtual machine and attached to another. This property should not be set for disks that are not + detached and attached frequently as it causes the disks to not align with the fault domain of + the virtual machine. + :paramtype optimized_for_frequent_attach: bool + """ + super().__init__(location=location, tags=tags, **kwargs) + self.managed_by = None + self.managed_by_extended = None + self.sku = sku + self.zones = zones + self.extended_location = extended_location + self.time_created = None + self.os_type = os_type + self.hyper_v_generation = hyper_v_generation + self.purchase_plan = purchase_plan + self.supported_capabilities = supported_capabilities + self.creation_data = creation_data + self.disk_size_gb = disk_size_gb + self.disk_size_bytes = None + self.unique_id = None + self.encryption_settings_collection = encryption_settings_collection + self.provisioning_state = None + self.disk_iops_read_write = disk_iops_read_write + self.disk_m_bps_read_write = disk_m_bps_read_write + self.disk_iops_read_only = disk_iops_read_only + self.disk_m_bps_read_only = disk_m_bps_read_only + self.disk_state = None + self.encryption = encryption + self.max_shares = max_shares + self.share_info = None + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.bursting_enabled_time = None + self.tier = tier + self.bursting_enabled = bursting_enabled + self.property_updates_in_progress = None + self.supports_hibernation = supports_hibernation + self.security_profile = security_profile + self.completion_percent = completion_percent + self.public_network_access = public_network_access + self.data_access_auth_mode = data_access_auth_mode + self.optimized_for_frequent_attach = optimized_for_frequent_attach + + +class DiskAccess(Resource): + """disk access resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar location: Resource location. Required. + :vartype location: str + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar extended_location: The extended location where the disk access will be created. Extended + location cannot be changed. + :vartype extended_location: ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocation + :ivar private_endpoint_connections: A readonly collection of private endpoint connections + created on the disk. Currently only one endpoint connection is supported. + :vartype private_endpoint_connections: + list[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :ivar provisioning_state: The disk access resource provisioning state. + :vartype provisioning_state: str + :ivar time_created: The time when the disk access was created. + :vartype time_created: ~datetime.datetime + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "location": {"required": True}, + "private_endpoint_connections": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "time_created": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "location": {"key": "location", "type": "str"}, + "tags": {"key": "tags", "type": "{str}"}, + "extended_location": {"key": "extendedLocation", "type": "ExtendedLocation"}, + "private_endpoint_connections": { + "key": "properties.privateEndpointConnections", + "type": "[PrivateEndpointConnection]", + }, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "time_created": {"key": "properties.timeCreated", "type": "iso-8601"}, + } + + def __init__( + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + extended_location: Optional["_models.ExtendedLocation"] = None, + **kwargs: Any + ) -> None: + """ + :keyword location: Resource location. Required. + :paramtype location: str + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword extended_location: The extended location where the disk access will be created. + Extended location cannot be changed. + :paramtype extended_location: ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocation + """ + super().__init__(location=location, tags=tags, **kwargs) + self.extended_location = extended_location + self.private_endpoint_connections = None + self.provisioning_state = None + self.time_created = None + + +class DiskAccessList(_serialization.Model): + """The List disk access operation response. + + All required parameters must be populated in order to send to Azure. + + :ivar value: A list of disk access resources. Required. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :ivar next_link: The uri to fetch the next page of disk access resources. Call ListNext() with + this to fetch the next page of disk access resources. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[DiskAccess]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: List["_models.DiskAccess"], next_link: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword value: A list of disk access resources. Required. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :keyword next_link: The uri to fetch the next page of disk access resources. Call ListNext() + with this to fetch the next page of disk access resources. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class DiskAccessUpdate(_serialization.Model): + """Used for updating a disk access resource. + + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + """ + + _attribute_map = { + "tags": {"key": "tags", "type": "{str}"}, + } + + def __init__(self, *, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + """ + super().__init__(**kwargs) + self.tags = tags + + +class DiskEncryptionSet(Resource): # pylint: disable=too-many-instance-attributes + """disk encryption set resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar location: Resource location. Required. + :vartype location: str + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar identity: The managed identity for the disk encryption set. It should be given permission + on the key vault before it can be used to encrypt disks. + :vartype identity: ~azure.mgmt.compute.v2023_01_02.models.EncryptionSetIdentity + :ivar encryption_type: The type of key used to encrypt the data of the disk. Known values are: + "EncryptionAtRestWithCustomerKey", "EncryptionAtRestWithPlatformAndCustomerKeys", and + "ConfidentialVmEncryptedWithCustomerKey". + :vartype encryption_type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetType + :ivar active_key: The key vault key which is currently used by this disk encryption set. + :vartype active_key: ~azure.mgmt.compute.v2023_01_02.models.KeyForDiskEncryptionSet + :ivar previous_keys: A readonly collection of key vault keys previously used by this disk + encryption set while a key rotation is in progress. It will be empty if there is no ongoing key + rotation. + :vartype previous_keys: list[~azure.mgmt.compute.v2023_01_02.models.KeyForDiskEncryptionSet] + :ivar provisioning_state: The disk encryption set provisioning state. + :vartype provisioning_state: str + :ivar rotation_to_latest_key_version_enabled: Set this flag to true to enable auto-updating of + this disk encryption set to the latest key version. + :vartype rotation_to_latest_key_version_enabled: bool + :ivar last_key_rotation_timestamp: The time when the active key of this disk encryption set was + updated. + :vartype last_key_rotation_timestamp: ~datetime.datetime + :ivar auto_key_rotation_error: The error that was encountered during auto-key rotation. If an + error is present, then auto-key rotation will not be attempted until the error on this disk + encryption set is fixed. + :vartype auto_key_rotation_error: ~azure.mgmt.compute.v2023_01_02.models.ApiError + :ivar federated_client_id: Multi-tenant application client id to access key vault in a + different tenant. Setting the value to 'None' will clear the property. + :vartype federated_client_id: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "location": {"required": True}, + "previous_keys": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "last_key_rotation_timestamp": {"readonly": True}, + "auto_key_rotation_error": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "location": {"key": "location", "type": "str"}, + "tags": {"key": "tags", "type": "{str}"}, + "identity": {"key": "identity", "type": "EncryptionSetIdentity"}, + "encryption_type": {"key": "properties.encryptionType", "type": "str"}, + "active_key": {"key": "properties.activeKey", "type": "KeyForDiskEncryptionSet"}, + "previous_keys": {"key": "properties.previousKeys", "type": "[KeyForDiskEncryptionSet]"}, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "rotation_to_latest_key_version_enabled": { + "key": "properties.rotationToLatestKeyVersionEnabled", + "type": "bool", + }, + "last_key_rotation_timestamp": {"key": "properties.lastKeyRotationTimestamp", "type": "iso-8601"}, + "auto_key_rotation_error": {"key": "properties.autoKeyRotationError", "type": "ApiError"}, + "federated_client_id": {"key": "properties.federatedClientId", "type": "str"}, + } + + def __init__( + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + identity: Optional["_models.EncryptionSetIdentity"] = None, + encryption_type: Optional[Union[str, "_models.DiskEncryptionSetType"]] = None, + active_key: Optional["_models.KeyForDiskEncryptionSet"] = None, + rotation_to_latest_key_version_enabled: Optional[bool] = None, + federated_client_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword location: Resource location. Required. + :paramtype location: str + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword identity: The managed identity for the disk encryption set. It should be given + permission on the key vault before it can be used to encrypt disks. + :paramtype identity: ~azure.mgmt.compute.v2023_01_02.models.EncryptionSetIdentity + :keyword encryption_type: The type of key used to encrypt the data of the disk. Known values + are: "EncryptionAtRestWithCustomerKey", "EncryptionAtRestWithPlatformAndCustomerKeys", and + "ConfidentialVmEncryptedWithCustomerKey". + :paramtype encryption_type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetType + :keyword active_key: The key vault key which is currently used by this disk encryption set. + :paramtype active_key: ~azure.mgmt.compute.v2023_01_02.models.KeyForDiskEncryptionSet + :keyword rotation_to_latest_key_version_enabled: Set this flag to true to enable auto-updating + of this disk encryption set to the latest key version. + :paramtype rotation_to_latest_key_version_enabled: bool + :keyword federated_client_id: Multi-tenant application client id to access key vault in a + different tenant. Setting the value to 'None' will clear the property. + :paramtype federated_client_id: str + """ + super().__init__(location=location, tags=tags, **kwargs) + self.identity = identity + self.encryption_type = encryption_type + self.active_key = active_key + self.previous_keys = None + self.provisioning_state = None + self.rotation_to_latest_key_version_enabled = rotation_to_latest_key_version_enabled + self.last_key_rotation_timestamp = None + self.auto_key_rotation_error = None + self.federated_client_id = federated_client_id + + +class DiskEncryptionSetList(_serialization.Model): + """The List disk encryption set operation response. + + All required parameters must be populated in order to send to Azure. + + :ivar value: A list of disk encryption sets. Required. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :ivar next_link: The uri to fetch the next page of disk encryption sets. Call ListNext() with + this to fetch the next page of disk encryption sets. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[DiskEncryptionSet]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__( + self, *, value: List["_models.DiskEncryptionSet"], next_link: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword value: A list of disk encryption sets. Required. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :keyword next_link: The uri to fetch the next page of disk encryption sets. Call ListNext() + with this to fetch the next page of disk encryption sets. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class DiskEncryptionSetUpdate(_serialization.Model): + """disk encryption set update resource. + + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar identity: The managed identity for the disk encryption set. It should be given permission + on the key vault before it can be used to encrypt disks. + :vartype identity: ~azure.mgmt.compute.v2023_01_02.models.EncryptionSetIdentity + :ivar encryption_type: The type of key used to encrypt the data of the disk. Known values are: + "EncryptionAtRestWithCustomerKey", "EncryptionAtRestWithPlatformAndCustomerKeys", and + "ConfidentialVmEncryptedWithCustomerKey". + :vartype encryption_type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetType + :ivar active_key: Key Vault Key Url to be used for server side encryption of Managed Disks and + Snapshots. + :vartype active_key: ~azure.mgmt.compute.v2023_01_02.models.KeyForDiskEncryptionSet + :ivar rotation_to_latest_key_version_enabled: Set this flag to true to enable auto-updating of + this disk encryption set to the latest key version. + :vartype rotation_to_latest_key_version_enabled: bool + :ivar federated_client_id: Multi-tenant application client id to access key vault in a + different tenant. Setting the value to 'None' will clear the property. + :vartype federated_client_id: str + """ + + _attribute_map = { + "tags": {"key": "tags", "type": "{str}"}, + "identity": {"key": "identity", "type": "EncryptionSetIdentity"}, + "encryption_type": {"key": "properties.encryptionType", "type": "str"}, + "active_key": {"key": "properties.activeKey", "type": "KeyForDiskEncryptionSet"}, + "rotation_to_latest_key_version_enabled": { + "key": "properties.rotationToLatestKeyVersionEnabled", + "type": "bool", + }, + "federated_client_id": {"key": "properties.federatedClientId", "type": "str"}, + } + + def __init__( + self, + *, + tags: Optional[Dict[str, str]] = None, + identity: Optional["_models.EncryptionSetIdentity"] = None, + encryption_type: Optional[Union[str, "_models.DiskEncryptionSetType"]] = None, + active_key: Optional["_models.KeyForDiskEncryptionSet"] = None, + rotation_to_latest_key_version_enabled: Optional[bool] = None, + federated_client_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword identity: The managed identity for the disk encryption set. It should be given + permission on the key vault before it can be used to encrypt disks. + :paramtype identity: ~azure.mgmt.compute.v2023_01_02.models.EncryptionSetIdentity + :keyword encryption_type: The type of key used to encrypt the data of the disk. Known values + are: "EncryptionAtRestWithCustomerKey", "EncryptionAtRestWithPlatformAndCustomerKeys", and + "ConfidentialVmEncryptedWithCustomerKey". + :paramtype encryption_type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetType + :keyword active_key: Key Vault Key Url to be used for server side encryption of Managed Disks + and Snapshots. + :paramtype active_key: ~azure.mgmt.compute.v2023_01_02.models.KeyForDiskEncryptionSet + :keyword rotation_to_latest_key_version_enabled: Set this flag to true to enable auto-updating + of this disk encryption set to the latest key version. + :paramtype rotation_to_latest_key_version_enabled: bool + :keyword federated_client_id: Multi-tenant application client id to access key vault in a + different tenant. Setting the value to 'None' will clear the property. + :paramtype federated_client_id: str + """ + super().__init__(**kwargs) + self.tags = tags + self.identity = identity + self.encryption_type = encryption_type + self.active_key = active_key + self.rotation_to_latest_key_version_enabled = rotation_to_latest_key_version_enabled + self.federated_client_id = federated_client_id + + +class DiskList(_serialization.Model): + """The List Disks operation response. + + All required parameters must be populated in order to send to Azure. + + :ivar value: A list of disks. Required. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.Disk] + :ivar next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch + the next page of disks. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[Disk]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: List["_models.Disk"], next_link: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword value: A list of disks. Required. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.Disk] + :keyword next_link: The uri to fetch the next page of disks. Call ListNext() with this to fetch + the next page of disks. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class ProxyOnlyResource(_serialization.Model): + """The ProxyOnly Resource model definition. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + + +class DiskRestorePoint(ProxyOnlyResource): # pylint: disable=too-many-instance-attributes + """Properties of disk restore point. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar time_created: The timestamp of restorePoint creation. + :vartype time_created: ~datetime.datetime + :ivar source_resource_id: arm id of source disk or source disk restore point. + :vartype source_resource_id: str + :ivar os_type: The Operating System type. Known values are: "Windows" and "Linux". + :vartype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :ivar hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS + disks only. Known values are: "V1" and "V2". + :vartype hyper_v_generation: str or ~azure.mgmt.compute.v2023_01_02.models.HyperVGeneration + :ivar purchase_plan: Purchase plan information for the the image from which the OS disk was + created. + :vartype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :ivar supported_capabilities: List of supported capabilities for the image from which the OS + disk was created. + :vartype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :ivar family_id: id of the backing snapshot's MIS family. + :vartype family_id: str + :ivar source_unique_id: unique incarnation id of the source disk. + :vartype source_unique_id: str + :ivar encryption: Encryption property can be used to encrypt data at rest with customer managed + keys or platform managed keys. + :vartype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :ivar supports_hibernation: Indicates the OS on a disk supports hibernation. + :vartype supports_hibernation: bool + :ivar network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :vartype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :ivar public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :ivar disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks. + :vartype disk_access_id: str + :ivar completion_percent: Percentage complete for the background copy of disk restore point + when source resource is from a different region. + :vartype completion_percent: float + :ivar replication_state: Replication state of disk restore point when source resource is from a + different region. + :vartype replication_state: str + :ivar source_resource_location: Location of source disk or source disk restore point when + source resource is from a different region. + :vartype source_resource_location: str + :ivar security_profile: Contains the security related information for the resource. + :vartype security_profile: ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityProfile + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "time_created": {"readonly": True}, + "source_resource_id": {"readonly": True}, + "os_type": {"readonly": True}, + "family_id": {"readonly": True}, + "source_unique_id": {"readonly": True}, + "encryption": {"readonly": True}, + "replication_state": {"readonly": True}, + "source_resource_location": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "time_created": {"key": "properties.timeCreated", "type": "iso-8601"}, + "source_resource_id": {"key": "properties.sourceResourceId", "type": "str"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "hyper_v_generation": {"key": "properties.hyperVGeneration", "type": "str"}, + "purchase_plan": {"key": "properties.purchasePlan", "type": "PurchasePlan"}, + "supported_capabilities": {"key": "properties.supportedCapabilities", "type": "SupportedCapabilities"}, + "family_id": {"key": "properties.familyId", "type": "str"}, + "source_unique_id": {"key": "properties.sourceUniqueId", "type": "str"}, + "encryption": {"key": "properties.encryption", "type": "Encryption"}, + "supports_hibernation": {"key": "properties.supportsHibernation", "type": "bool"}, + "network_access_policy": {"key": "properties.networkAccessPolicy", "type": "str"}, + "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"}, + "disk_access_id": {"key": "properties.diskAccessId", "type": "str"}, + "completion_percent": {"key": "properties.completionPercent", "type": "float"}, + "replication_state": {"key": "properties.replicationState", "type": "str"}, + "source_resource_location": {"key": "properties.sourceResourceLocation", "type": "str"}, + "security_profile": {"key": "properties.securityProfile", "type": "DiskSecurityProfile"}, + } + + def __init__( + self, + *, + hyper_v_generation: Optional[Union[str, "_models.HyperVGeneration"]] = None, + purchase_plan: Optional["_models.PurchasePlan"] = None, + supported_capabilities: Optional["_models.SupportedCapabilities"] = None, + supports_hibernation: Optional[bool] = None, + network_access_policy: Optional[Union[str, "_models.NetworkAccessPolicy"]] = None, + public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None, + disk_access_id: Optional[str] = None, + completion_percent: Optional[float] = None, + security_profile: Optional["_models.DiskSecurityProfile"] = None, + **kwargs: Any + ) -> None: + """ + :keyword hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS + disks only. Known values are: "V1" and "V2". + :paramtype hyper_v_generation: str or ~azure.mgmt.compute.v2023_01_02.models.HyperVGeneration + :keyword purchase_plan: Purchase plan information for the the image from which the OS disk was + created. + :paramtype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :keyword supported_capabilities: List of supported capabilities for the image from which the OS + disk was created. + :paramtype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :keyword supports_hibernation: Indicates the OS on a disk supports hibernation. + :paramtype supports_hibernation: bool + :keyword network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :paramtype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :keyword public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :paramtype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :keyword disk_access_id: ARM id of the DiskAccess resource for using private endpoints on + disks. + :paramtype disk_access_id: str + :keyword completion_percent: Percentage complete for the background copy of disk restore point + when source resource is from a different region. + :paramtype completion_percent: float + :keyword security_profile: Contains the security related information for the resource. + :paramtype security_profile: ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityProfile + """ + super().__init__(**kwargs) + self.time_created = None + self.source_resource_id = None + self.os_type = None + self.hyper_v_generation = hyper_v_generation + self.purchase_plan = purchase_plan + self.supported_capabilities = supported_capabilities + self.family_id = None + self.source_unique_id = None + self.encryption = None + self.supports_hibernation = supports_hibernation + self.network_access_policy = network_access_policy + self.public_network_access = public_network_access + self.disk_access_id = disk_access_id + self.completion_percent = completion_percent + self.replication_state = None + self.source_resource_location = None + self.security_profile = security_profile + + +class DiskRestorePointList(_serialization.Model): + """The List Disk Restore Points operation response. + + All required parameters must be populated in order to send to Azure. + + :ivar value: A list of disk restore points. Required. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.DiskRestorePoint] + :ivar next_link: The uri to fetch the next page of disk restore points. Call ListNext() with + this to fetch the next page of disk restore points. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[DiskRestorePoint]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__( + self, *, value: List["_models.DiskRestorePoint"], next_link: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword value: A list of disk restore points. Required. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.DiskRestorePoint] + :keyword next_link: The uri to fetch the next page of disk restore points. Call ListNext() with + this to fetch the next page of disk restore points. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class DiskSecurityProfile(_serialization.Model): + """Contains the security related information for the resource. + + :ivar security_type: Specifies the SecurityType of the VM. Applicable for OS disks only. Known + values are: "TrustedLaunch", "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey", + "ConfidentialVM_DiskEncryptedWithPlatformKey", and + "ConfidentialVM_DiskEncryptedWithCustomerKey". + :vartype security_type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityTypes + :ivar secure_vm_disk_encryption_set_id: ResourceId of the disk encryption set associated to + Confidential VM supported disk encrypted with customer managed key. + :vartype secure_vm_disk_encryption_set_id: str + """ + + _attribute_map = { + "security_type": {"key": "securityType", "type": "str"}, + "secure_vm_disk_encryption_set_id": {"key": "secureVMDiskEncryptionSetId", "type": "str"}, + } + + def __init__( + self, + *, + security_type: Optional[Union[str, "_models.DiskSecurityTypes"]] = None, + secure_vm_disk_encryption_set_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword security_type: Specifies the SecurityType of the VM. Applicable for OS disks only. + Known values are: "TrustedLaunch", "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey", + "ConfidentialVM_DiskEncryptedWithPlatformKey", and + "ConfidentialVM_DiskEncryptedWithCustomerKey". + :paramtype security_type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityTypes + :keyword secure_vm_disk_encryption_set_id: ResourceId of the disk encryption set associated to + Confidential VM supported disk encrypted with customer managed key. + :paramtype secure_vm_disk_encryption_set_id: str + """ + super().__init__(**kwargs) + self.security_type = security_type + self.secure_vm_disk_encryption_set_id = secure_vm_disk_encryption_set_id + + +class DiskSku(_serialization.Model): + """The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS, + Premium_ZRS, StandardSSD_ZRS, or PremiumV2_LRS. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: The sku name. Known values are: "Standard_LRS", "Premium_LRS", "StandardSSD_LRS", + "UltraSSD_LRS", "Premium_ZRS", "StandardSSD_ZRS", and "PremiumV2_LRS". + :vartype name: str or ~azure.mgmt.compute.v2023_01_02.models.DiskStorageAccountTypes + :ivar tier: The sku tier. + :vartype tier: str + """ + + _validation = { + "tier": {"readonly": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "tier": {"key": "tier", "type": "str"}, + } + + def __init__(self, *, name: Optional[Union[str, "_models.DiskStorageAccountTypes"]] = None, **kwargs: Any) -> None: + """ + :keyword name: The sku name. Known values are: "Standard_LRS", "Premium_LRS", + "StandardSSD_LRS", "UltraSSD_LRS", "Premium_ZRS", "StandardSSD_ZRS", and "PremiumV2_LRS". + :paramtype name: str or ~azure.mgmt.compute.v2023_01_02.models.DiskStorageAccountTypes + """ + super().__init__(**kwargs) + self.name = name + self.tier = None + + +class DiskUpdate(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Disk update resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, UltraSSD_LRS, + Premium_ZRS, StandardSSD_ZRS, or PremiumV2_LRS. + :vartype sku: ~azure.mgmt.compute.v2023_01_02.models.DiskSku + :ivar os_type: the Operating System type. Known values are: "Windows" and "Linux". + :vartype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :ivar disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :vartype disk_size_gb: int + :ivar encryption_settings_collection: Encryption settings collection used be Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :vartype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :ivar disk_iops_read_write: The number of IOPS allowed for this disk; only settable for + UltraSSD disks. One operation can transfer between 4k and 256k bytes. + :vartype disk_iops_read_write: int + :ivar disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD + disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :vartype disk_m_bps_read_write: int + :ivar disk_iops_read_only: The total number of IOPS that will be allowed across all VMs + mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. + :vartype disk_iops_read_only: int + :ivar disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs + mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses + the ISO notation, of powers of 10. + :vartype disk_m_bps_read_only: int + :ivar max_shares: The maximum number of VMs that can attach to the disk at the same time. Value + greater than one indicates a disk that can be mounted on multiple VMs at the same time. + :vartype max_shares: int + :ivar encryption: Encryption property can be used to encrypt data at rest with customer managed + keys or platform managed keys. + :vartype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :ivar network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :vartype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :ivar disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks. + :vartype disk_access_id: str + :ivar tier: Performance tier of the disk (e.g, P4, S10) as described here: + https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra + disks. + :vartype tier: str + :ivar bursting_enabled: Set to true to enable bursting beyond the provisioned performance + target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. + :vartype bursting_enabled: bool + :ivar purchase_plan: Purchase plan information to be added on the OS disk. + :vartype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :ivar supported_capabilities: List of supported capabilities to be added on the OS disk. + :vartype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :ivar property_updates_in_progress: Properties of the disk for which update is pending. + :vartype property_updates_in_progress: + ~azure.mgmt.compute.v2023_01_02.models.PropertyUpdatesInProgress + :ivar supports_hibernation: Indicates the OS on a disk supports hibernation. + :vartype supports_hibernation: bool + :ivar public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :ivar data_access_auth_mode: Additional authentication requirements when exporting or uploading + to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :vartype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + :ivar optimized_for_frequent_attach: Setting this property to true improves reliability and + performance of data disks that are frequently (more than 5 times a day) by detached from one + virtual machine and attached to another. This property should not be set for disks that are not + detached and attached frequently as it causes the disks to not align with the fault domain of + the virtual machine. + :vartype optimized_for_frequent_attach: bool + """ + + _validation = { + "property_updates_in_progress": {"readonly": True}, + } + + _attribute_map = { + "tags": {"key": "tags", "type": "{str}"}, + "sku": {"key": "sku", "type": "DiskSku"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "disk_size_gb": {"key": "properties.diskSizeGB", "type": "int"}, + "encryption_settings_collection": { + "key": "properties.encryptionSettingsCollection", + "type": "EncryptionSettingsCollection", + }, + "disk_iops_read_write": {"key": "properties.diskIOPSReadWrite", "type": "int"}, + "disk_m_bps_read_write": {"key": "properties.diskMBpsReadWrite", "type": "int"}, + "disk_iops_read_only": {"key": "properties.diskIOPSReadOnly", "type": "int"}, + "disk_m_bps_read_only": {"key": "properties.diskMBpsReadOnly", "type": "int"}, + "max_shares": {"key": "properties.maxShares", "type": "int"}, + "encryption": {"key": "properties.encryption", "type": "Encryption"}, + "network_access_policy": {"key": "properties.networkAccessPolicy", "type": "str"}, + "disk_access_id": {"key": "properties.diskAccessId", "type": "str"}, + "tier": {"key": "properties.tier", "type": "str"}, + "bursting_enabled": {"key": "properties.burstingEnabled", "type": "bool"}, + "purchase_plan": {"key": "properties.purchasePlan", "type": "PurchasePlan"}, + "supported_capabilities": {"key": "properties.supportedCapabilities", "type": "SupportedCapabilities"}, + "property_updates_in_progress": { + "key": "properties.propertyUpdatesInProgress", + "type": "PropertyUpdatesInProgress", + }, + "supports_hibernation": {"key": "properties.supportsHibernation", "type": "bool"}, + "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"}, + "data_access_auth_mode": {"key": "properties.dataAccessAuthMode", "type": "str"}, + "optimized_for_frequent_attach": {"key": "properties.optimizedForFrequentAttach", "type": "bool"}, + } + + def __init__( + self, + *, + tags: Optional[Dict[str, str]] = None, + sku: Optional["_models.DiskSku"] = None, + os_type: Optional[Union[str, "_models.OperatingSystemTypes"]] = None, + disk_size_gb: Optional[int] = None, + encryption_settings_collection: Optional["_models.EncryptionSettingsCollection"] = None, + disk_iops_read_write: Optional[int] = None, + disk_m_bps_read_write: Optional[int] = None, + disk_iops_read_only: Optional[int] = None, + disk_m_bps_read_only: Optional[int] = None, + max_shares: Optional[int] = None, + encryption: Optional["_models.Encryption"] = None, + network_access_policy: Optional[Union[str, "_models.NetworkAccessPolicy"]] = None, + disk_access_id: Optional[str] = None, + tier: Optional[str] = None, + bursting_enabled: Optional[bool] = None, + purchase_plan: Optional["_models.PurchasePlan"] = None, + supported_capabilities: Optional["_models.SupportedCapabilities"] = None, + supports_hibernation: Optional[bool] = None, + public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None, + data_access_auth_mode: Optional[Union[str, "_models.DataAccessAuthMode"]] = None, + optimized_for_frequent_attach: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword sku: The disks sku name. Can be Standard_LRS, Premium_LRS, StandardSSD_LRS, + UltraSSD_LRS, Premium_ZRS, StandardSSD_ZRS, or PremiumV2_LRS. + :paramtype sku: ~azure.mgmt.compute.v2023_01_02.models.DiskSku + :keyword os_type: the Operating System type. Known values are: "Windows" and "Linux". + :paramtype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :keyword disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :paramtype disk_size_gb: int + :keyword encryption_settings_collection: Encryption settings collection used be Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :paramtype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :keyword disk_iops_read_write: The number of IOPS allowed for this disk; only settable for + UltraSSD disks. One operation can transfer between 4k and 256k bytes. + :paramtype disk_iops_read_write: int + :keyword disk_m_bps_read_write: The bandwidth allowed for this disk; only settable for UltraSSD + disks. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of + 10. + :paramtype disk_m_bps_read_write: int + :keyword disk_iops_read_only: The total number of IOPS that will be allowed across all VMs + mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes. + :paramtype disk_iops_read_only: int + :keyword disk_m_bps_read_only: The total throughput (MBps) that will be allowed across all VMs + mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses + the ISO notation, of powers of 10. + :paramtype disk_m_bps_read_only: int + :keyword max_shares: The maximum number of VMs that can attach to the disk at the same time. + Value greater than one indicates a disk that can be mounted on multiple VMs at the same time. + :paramtype max_shares: int + :keyword encryption: Encryption property can be used to encrypt data at rest with customer + managed keys or platform managed keys. + :paramtype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :keyword network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :paramtype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :keyword disk_access_id: ARM id of the DiskAccess resource for using private endpoints on + disks. + :paramtype disk_access_id: str + :keyword tier: Performance tier of the disk (e.g, P4, S10) as described here: + https://azure.microsoft.com/en-us/pricing/details/managed-disks/. Does not apply to Ultra + disks. + :paramtype tier: str + :keyword bursting_enabled: Set to true to enable bursting beyond the provisioned performance + target of the disk. Bursting is disabled by default. Does not apply to Ultra disks. + :paramtype bursting_enabled: bool + :keyword purchase_plan: Purchase plan information to be added on the OS disk. + :paramtype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :keyword supported_capabilities: List of supported capabilities to be added on the OS disk. + :paramtype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :keyword supports_hibernation: Indicates the OS on a disk supports hibernation. + :paramtype supports_hibernation: bool + :keyword public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :paramtype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :keyword data_access_auth_mode: Additional authentication requirements when exporting or + uploading to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :paramtype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + :keyword optimized_for_frequent_attach: Setting this property to true improves reliability and + performance of data disks that are frequently (more than 5 times a day) by detached from one + virtual machine and attached to another. This property should not be set for disks that are not + detached and attached frequently as it causes the disks to not align with the fault domain of + the virtual machine. + :paramtype optimized_for_frequent_attach: bool + """ + super().__init__(**kwargs) + self.tags = tags + self.sku = sku + self.os_type = os_type + self.disk_size_gb = disk_size_gb + self.encryption_settings_collection = encryption_settings_collection + self.disk_iops_read_write = disk_iops_read_write + self.disk_m_bps_read_write = disk_m_bps_read_write + self.disk_iops_read_only = disk_iops_read_only + self.disk_m_bps_read_only = disk_m_bps_read_only + self.max_shares = max_shares + self.encryption = encryption + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.tier = tier + self.bursting_enabled = bursting_enabled + self.purchase_plan = purchase_plan + self.supported_capabilities = supported_capabilities + self.property_updates_in_progress = None + self.supports_hibernation = supports_hibernation + self.public_network_access = public_network_access + self.data_access_auth_mode = data_access_auth_mode + self.optimized_for_frequent_attach = optimized_for_frequent_attach + + +class Encryption(_serialization.Model): + """Encryption at rest settings for disk or snapshot. + + :ivar disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling + encryption at rest. + :vartype disk_encryption_set_id: str + :ivar type: The type of key used to encrypt the data of the disk. Known values are: + "EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey", and + "EncryptionAtRestWithPlatformAndCustomerKeys". + :vartype type: str or ~azure.mgmt.compute.v2023_01_02.models.EncryptionType + """ + + _attribute_map = { + "disk_encryption_set_id": {"key": "diskEncryptionSetId", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__( + self, + *, + disk_encryption_set_id: Optional[str] = None, + type: Optional[Union[str, "_models.EncryptionType"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling + encryption at rest. + :paramtype disk_encryption_set_id: str + :keyword type: The type of key used to encrypt the data of the disk. Known values are: + "EncryptionAtRestWithPlatformKey", "EncryptionAtRestWithCustomerKey", and + "EncryptionAtRestWithPlatformAndCustomerKeys". + :paramtype type: str or ~azure.mgmt.compute.v2023_01_02.models.EncryptionType + """ + super().__init__(**kwargs) + self.disk_encryption_set_id = disk_encryption_set_id + self.type = type + + +class EncryptionSetIdentity(_serialization.Model): + """The managed identity for the disk encryption set. It should be given permission on the key + vault before it can be used to encrypt disks. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is + supported for new creations. Disk Encryption Sets can be updated with Identity type None during + migration of subscription to a new Azure Active Directory tenant; it will cause the encrypted + resources to lose access to the keys. Known values are: "SystemAssigned", "UserAssigned", + "SystemAssigned, UserAssigned", and "None". + :vartype type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetIdentityType + :ivar principal_id: The object id of the Managed Identity Resource. This will be sent to the RP + from ARM via the x-ms-identity-principal-id header in the PUT request if the resource has a + systemAssigned(implicit) identity. + :vartype principal_id: str + :ivar tenant_id: The tenant id of the Managed Identity Resource. This will be sent to the RP + from ARM via the x-ms-client-tenant-id header in the PUT request if the resource has a + systemAssigned(implicit) identity. + :vartype tenant_id: str + :ivar user_assigned_identities: The list of user identities associated with the disk encryption + set. The user identity dictionary key references will be ARM resource ids in the form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + :vartype user_assigned_identities: dict[str, + ~azure.mgmt.compute.v2023_01_02.models.UserAssignedIdentitiesValue] + """ + + _validation = { + "principal_id": {"readonly": True}, + "tenant_id": {"readonly": True}, + } + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "principal_id": {"key": "principalId", "type": "str"}, + "tenant_id": {"key": "tenantId", "type": "str"}, + "user_assigned_identities": {"key": "userAssignedIdentities", "type": "{UserAssignedIdentitiesValue}"}, + } + + def __init__( + self, + *, + type: Optional[Union[str, "_models.DiskEncryptionSetIdentityType"]] = None, + user_assigned_identities: Optional[Dict[str, "_models.UserAssignedIdentitiesValue"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword type: The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned + is supported for new creations. Disk Encryption Sets can be updated with Identity type None + during migration of subscription to a new Azure Active Directory tenant; it will cause the + encrypted resources to lose access to the keys. Known values are: "SystemAssigned", + "UserAssigned", "SystemAssigned, UserAssigned", and "None". + :paramtype type: str or ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetIdentityType + :keyword user_assigned_identities: The list of user identities associated with the disk + encryption set. The user identity dictionary key references will be ARM resource ids in the + form: + '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + :paramtype user_assigned_identities: dict[str, + ~azure.mgmt.compute.v2023_01_02.models.UserAssignedIdentitiesValue] + """ + super().__init__(**kwargs) + self.type = type + self.principal_id = None + self.tenant_id = None + self.user_assigned_identities = user_assigned_identities + + +class EncryptionSettingsCollection(_serialization.Model): + """Encryption settings for disk or snapshot. + + All required parameters must be populated in order to send to Azure. + + :ivar enabled: Set this flag to true and provide DiskEncryptionKey and optional + KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and + KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, + the existing settings remain unchanged. Required. + :vartype enabled: bool + :ivar encryption_settings: A collection of encryption settings, one for each disk volume. + :vartype encryption_settings: + list[~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsElement] + :ivar encryption_settings_version: Describes what type of encryption is used for the disks. + Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption + with AAD app.'1.1' corresponds to Azure Disk Encryption. + :vartype encryption_settings_version: str + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "enabled": {"key": "enabled", "type": "bool"}, + "encryption_settings": {"key": "encryptionSettings", "type": "[EncryptionSettingsElement]"}, + "encryption_settings_version": {"key": "encryptionSettingsVersion", "type": "str"}, + } + + def __init__( + self, + *, + enabled: bool, + encryption_settings: Optional[List["_models.EncryptionSettingsElement"]] = None, + encryption_settings_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword enabled: Set this flag to true and provide DiskEncryptionKey and optional + KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and + KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, + the existing settings remain unchanged. Required. + :paramtype enabled: bool + :keyword encryption_settings: A collection of encryption settings, one for each disk volume. + :paramtype encryption_settings: + list[~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsElement] + :keyword encryption_settings_version: Describes what type of encryption is used for the disks. + Once this field is set, it cannot be overwritten. '1.0' corresponds to Azure Disk Encryption + with AAD app.'1.1' corresponds to Azure Disk Encryption. + :paramtype encryption_settings_version: str + """ + super().__init__(**kwargs) + self.enabled = enabled + self.encryption_settings = encryption_settings + self.encryption_settings_version = encryption_settings_version + + +class EncryptionSettingsElement(_serialization.Model): + """Encryption settings for one disk volume. + + :ivar disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key. + :vartype disk_encryption_key: ~azure.mgmt.compute.v2023_01_02.models.KeyVaultAndSecretReference + :ivar key_encryption_key: Key Vault Key Url and vault id of the key encryption key. + KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. + :vartype key_encryption_key: ~azure.mgmt.compute.v2023_01_02.models.KeyVaultAndKeyReference + """ + + _attribute_map = { + "disk_encryption_key": {"key": "diskEncryptionKey", "type": "KeyVaultAndSecretReference"}, + "key_encryption_key": {"key": "keyEncryptionKey", "type": "KeyVaultAndKeyReference"}, + } + + def __init__( + self, + *, + disk_encryption_key: Optional["_models.KeyVaultAndSecretReference"] = None, + key_encryption_key: Optional["_models.KeyVaultAndKeyReference"] = None, + **kwargs: Any + ) -> None: + """ + :keyword disk_encryption_key: Key Vault Secret Url and vault id of the disk encryption key. + :paramtype disk_encryption_key: + ~azure.mgmt.compute.v2023_01_02.models.KeyVaultAndSecretReference + :keyword key_encryption_key: Key Vault Key Url and vault id of the key encryption key. + KeyEncryptionKey is optional and when provided is used to unwrap the disk encryption key. + :paramtype key_encryption_key: ~azure.mgmt.compute.v2023_01_02.models.KeyVaultAndKeyReference + """ + super().__init__(**kwargs) + self.disk_encryption_key = disk_encryption_key + self.key_encryption_key = key_encryption_key + + +class ExtendedLocation(_serialization.Model): + """The complex type of the extended location. + + :ivar name: The name of the extended location. + :vartype name: str + :ivar type: The type of the extended location. "EdgeZone" + :vartype type: str or ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocationTypes + """ + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + } + + def __init__( + self, + *, + name: Optional[str] = None, + type: Optional[Union[str, "_models.ExtendedLocationTypes"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: The name of the extended location. + :paramtype name: str + :keyword type: The type of the extended location. "EdgeZone" + :paramtype type: str or ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocationTypes + """ + super().__init__(**kwargs) + self.name = name + self.type = type + + +class GrantAccessData(_serialization.Model): + """Data used for requesting a SAS. + + All required parameters must be populated in order to send to Azure. + + :ivar access: Required. Known values are: "None", "Read", and "Write". + :vartype access: str or ~azure.mgmt.compute.v2023_01_02.models.AccessLevel + :ivar duration_in_seconds: Time duration in seconds until the SAS access expires. Required. + :vartype duration_in_seconds: int + :ivar get_secure_vm_guest_state_sas: Set this flag to true to get additional SAS for VM guest + state. + :vartype get_secure_vm_guest_state_sas: bool + :ivar file_format: Used to specify the file format when making request for SAS on a VHDX file + format snapshot. Known values are: "VHD" and "VHDX". + :vartype file_format: str or ~azure.mgmt.compute.v2023_01_02.models.FileFormat + """ + + _validation = { + "access": {"required": True}, + "duration_in_seconds": {"required": True}, + } + + _attribute_map = { + "access": {"key": "access", "type": "str"}, + "duration_in_seconds": {"key": "durationInSeconds", "type": "int"}, + "get_secure_vm_guest_state_sas": {"key": "getSecureVMGuestStateSAS", "type": "bool"}, + "file_format": {"key": "fileFormat", "type": "str"}, + } + + def __init__( + self, + *, + access: Union[str, "_models.AccessLevel"], + duration_in_seconds: int, + get_secure_vm_guest_state_sas: Optional[bool] = None, + file_format: Optional[Union[str, "_models.FileFormat"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword access: Required. Known values are: "None", "Read", and "Write". + :paramtype access: str or ~azure.mgmt.compute.v2023_01_02.models.AccessLevel + :keyword duration_in_seconds: Time duration in seconds until the SAS access expires. Required. + :paramtype duration_in_seconds: int + :keyword get_secure_vm_guest_state_sas: Set this flag to true to get additional SAS for VM + guest state. + :paramtype get_secure_vm_guest_state_sas: bool + :keyword file_format: Used to specify the file format when making request for SAS on a VHDX + file format snapshot. Known values are: "VHD" and "VHDX". + :paramtype file_format: str or ~azure.mgmt.compute.v2023_01_02.models.FileFormat + """ + super().__init__(**kwargs) + self.access = access + self.duration_in_seconds = duration_in_seconds + self.get_secure_vm_guest_state_sas = get_secure_vm_guest_state_sas + self.file_format = file_format + + +class ImageDiskReference(_serialization.Model): + """The source image used for creating the disk. + + :ivar id: A relative uri containing either a Platform Image Repository, user image, or Azure + Compute Gallery image reference. + :vartype id: str + :ivar shared_gallery_image_id: A relative uri containing a direct shared Azure Compute Gallery + image reference. + :vartype shared_gallery_image_id: str + :ivar community_gallery_image_id: A relative uri containing a community Azure Compute Gallery + image reference. + :vartype community_gallery_image_id: str + :ivar lun: If the disk is created from an image's data disk, this is an index that indicates + which of the data disks in the image to use. For OS disks, this field is null. + :vartype lun: int + """ + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "shared_gallery_image_id": {"key": "sharedGalleryImageId", "type": "str"}, + "community_gallery_image_id": {"key": "communityGalleryImageId", "type": "str"}, + "lun": {"key": "lun", "type": "int"}, + } + + def __init__( + self, + *, + id: Optional[str] = None, # pylint: disable=redefined-builtin + shared_gallery_image_id: Optional[str] = None, + community_gallery_image_id: Optional[str] = None, + lun: Optional[int] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: A relative uri containing either a Platform Image Repository, user image, or Azure + Compute Gallery image reference. + :paramtype id: str + :keyword shared_gallery_image_id: A relative uri containing a direct shared Azure Compute + Gallery image reference. + :paramtype shared_gallery_image_id: str + :keyword community_gallery_image_id: A relative uri containing a community Azure Compute + Gallery image reference. + :paramtype community_gallery_image_id: str + :keyword lun: If the disk is created from an image's data disk, this is an index that indicates + which of the data disks in the image to use. For OS disks, this field is null. + :paramtype lun: int + """ + super().__init__(**kwargs) + self.id = id + self.shared_gallery_image_id = shared_gallery_image_id + self.community_gallery_image_id = community_gallery_image_id + self.lun = lun + + +class InnerError(_serialization.Model): + """Inner error details. + + :ivar exceptiontype: The exception type. + :vartype exceptiontype: str + :ivar errordetail: The internal error message or exception dump. + :vartype errordetail: str + """ + + _attribute_map = { + "exceptiontype": {"key": "exceptiontype", "type": "str"}, + "errordetail": {"key": "errordetail", "type": "str"}, + } + + def __init__( + self, *, exceptiontype: Optional[str] = None, errordetail: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword exceptiontype: The exception type. + :paramtype exceptiontype: str + :keyword errordetail: The internal error message or exception dump. + :paramtype errordetail: str + """ + super().__init__(**kwargs) + self.exceptiontype = exceptiontype + self.errordetail = errordetail + + +class KeyForDiskEncryptionSet(_serialization.Model): + """Key Vault Key Url to be used for server side encryption of Managed Disks and Snapshots. + + All required parameters must be populated in order to send to Azure. + + :ivar source_vault: Resource id of the KeyVault containing the key or secret. This property is + optional and cannot be used if the KeyVault subscription is not the same as the Disk Encryption + Set subscription. + :vartype source_vault: ~azure.mgmt.compute.v2023_01_02.models.SourceVault + :ivar key_url: Fully versioned Key Url pointing to a key in KeyVault. Version segment of the + Url is required regardless of rotationToLatestKeyVersionEnabled value. Required. + :vartype key_url: str + """ + + _validation = { + "key_url": {"required": True}, + } + + _attribute_map = { + "source_vault": {"key": "sourceVault", "type": "SourceVault"}, + "key_url": {"key": "keyUrl", "type": "str"}, + } + + def __init__(self, *, key_url: str, source_vault: Optional["_models.SourceVault"] = None, **kwargs: Any) -> None: + """ + :keyword source_vault: Resource id of the KeyVault containing the key or secret. This property + is optional and cannot be used if the KeyVault subscription is not the same as the Disk + Encryption Set subscription. + :paramtype source_vault: ~azure.mgmt.compute.v2023_01_02.models.SourceVault + :keyword key_url: Fully versioned Key Url pointing to a key in KeyVault. Version segment of the + Url is required regardless of rotationToLatestKeyVersionEnabled value. Required. + :paramtype key_url: str + """ + super().__init__(**kwargs) + self.source_vault = source_vault + self.key_url = key_url + + +class KeyVaultAndKeyReference(_serialization.Model): + """Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the + encryptionKey. + + All required parameters must be populated in order to send to Azure. + + :ivar source_vault: Resource id of the KeyVault containing the key or secret. Required. + :vartype source_vault: ~azure.mgmt.compute.v2023_01_02.models.SourceVault + :ivar key_url: Url pointing to a key or secret in KeyVault. Required. + :vartype key_url: str + """ + + _validation = { + "source_vault": {"required": True}, + "key_url": {"required": True}, + } + + _attribute_map = { + "source_vault": {"key": "sourceVault", "type": "SourceVault"}, + "key_url": {"key": "keyUrl", "type": "str"}, + } + + def __init__(self, *, source_vault: "_models.SourceVault", key_url: str, **kwargs: Any) -> None: + """ + :keyword source_vault: Resource id of the KeyVault containing the key or secret. Required. + :paramtype source_vault: ~azure.mgmt.compute.v2023_01_02.models.SourceVault + :keyword key_url: Url pointing to a key or secret in KeyVault. Required. + :paramtype key_url: str + """ + super().__init__(**kwargs) + self.source_vault = source_vault + self.key_url = key_url + + +class KeyVaultAndSecretReference(_serialization.Model): + """Key Vault Secret Url and vault id of the encryption key. + + All required parameters must be populated in order to send to Azure. + + :ivar source_vault: Resource id of the KeyVault containing the key or secret. Required. + :vartype source_vault: ~azure.mgmt.compute.v2023_01_02.models.SourceVault + :ivar secret_url: Url pointing to a key or secret in KeyVault. Required. + :vartype secret_url: str + """ + + _validation = { + "source_vault": {"required": True}, + "secret_url": {"required": True}, + } + + _attribute_map = { + "source_vault": {"key": "sourceVault", "type": "SourceVault"}, + "secret_url": {"key": "secretUrl", "type": "str"}, + } + + def __init__(self, *, source_vault: "_models.SourceVault", secret_url: str, **kwargs: Any) -> None: + """ + :keyword source_vault: Resource id of the KeyVault containing the key or secret. Required. + :paramtype source_vault: ~azure.mgmt.compute.v2023_01_02.models.SourceVault + :keyword secret_url: Url pointing to a key or secret in KeyVault. Required. + :paramtype secret_url: str + """ + super().__init__(**kwargs) + self.source_vault = source_vault + self.secret_url = secret_url + + +class PrivateEndpoint(_serialization.Model): + """The Private Endpoint resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: The ARM identifier for Private Endpoint. + :vartype id: str + """ + + _validation = { + "id": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + + +class PrivateEndpointConnection(_serialization.Model): + """The Private Endpoint Connection resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: private endpoint connection Id. + :vartype id: str + :ivar name: private endpoint connection name. + :vartype name: str + :ivar type: private endpoint connection type. + :vartype type: str + :ivar private_endpoint: The resource of private end point. + :vartype private_endpoint: ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpoint + :ivar private_link_service_connection_state: A collection of information about the state of the + connection between DiskAccess and Virtual Network. + :vartype private_link_service_connection_state: + ~azure.mgmt.compute.v2023_01_02.models.PrivateLinkServiceConnectionState + :ivar provisioning_state: The provisioning state of the private endpoint connection resource. + Known values are: "Succeeded", "Creating", "Deleting", and "Failed". + :vartype provisioning_state: str or + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnectionProvisioningState + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "private_endpoint": {"readonly": True}, + "provisioning_state": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "private_endpoint": {"key": "properties.privateEndpoint", "type": "PrivateEndpoint"}, + "private_link_service_connection_state": { + "key": "properties.privateLinkServiceConnectionState", + "type": "PrivateLinkServiceConnectionState", + }, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + } + + def __init__( + self, + *, + private_link_service_connection_state: Optional["_models.PrivateLinkServiceConnectionState"] = None, + **kwargs: Any + ) -> None: + """ + :keyword private_link_service_connection_state: A collection of information about the state of + the connection between DiskAccess and Virtual Network. + :paramtype private_link_service_connection_state: + ~azure.mgmt.compute.v2023_01_02.models.PrivateLinkServiceConnectionState + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.private_endpoint = None + self.private_link_service_connection_state = private_link_service_connection_state + self.provisioning_state = None + + +class PrivateEndpointConnectionListResult(_serialization.Model): + """A list of private link resources. + + :ivar value: Array of private endpoint connections. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :ivar next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to + fetch the next page of snapshots. + :vartype next_link: str + """ + + _attribute_map = { + "value": {"key": "value", "type": "[PrivateEndpointConnection]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__( + self, + *, + value: Optional[List["_models.PrivateEndpointConnection"]] = None, + next_link: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword value: Array of private endpoint connections. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :keyword next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to + fetch the next page of snapshots. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class PrivateLinkResource(_serialization.Model): + """A private link resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: private link resource Id. + :vartype id: str + :ivar name: private link resource name. + :vartype name: str + :ivar type: private link resource type. + :vartype type: str + :ivar group_id: The private link resource group id. + :vartype group_id: str + :ivar required_members: The private link resource required member names. + :vartype required_members: list[str] + :ivar required_zone_names: The private link resource DNS zone name. + :vartype required_zone_names: list[str] + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "group_id": {"readonly": True}, + "required_members": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "group_id": {"key": "properties.groupId", "type": "str"}, + "required_members": {"key": "properties.requiredMembers", "type": "[str]"}, + "required_zone_names": {"key": "properties.requiredZoneNames", "type": "[str]"}, + } + + def __init__(self, *, required_zone_names: Optional[List[str]] = None, **kwargs: Any) -> None: + """ + :keyword required_zone_names: The private link resource DNS zone name. + :paramtype required_zone_names: list[str] + """ + super().__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.group_id = None + self.required_members = None + self.required_zone_names = required_zone_names + + +class PrivateLinkResourceListResult(_serialization.Model): + """A list of private link resources. + + :ivar value: Array of private link resources. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.PrivateLinkResource] + """ + + _attribute_map = { + "value": {"key": "value", "type": "[PrivateLinkResource]"}, + } + + def __init__(self, *, value: Optional[List["_models.PrivateLinkResource"]] = None, **kwargs: Any) -> None: + """ + :keyword value: Array of private link resources. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.PrivateLinkResource] + """ + super().__init__(**kwargs) + self.value = value + + +class PrivateLinkServiceConnectionState(_serialization.Model): + """A collection of information about the state of the connection between service consumer and + provider. + + :ivar status: Indicates whether the connection has been Approved/Rejected/Removed by the owner + of the service. Known values are: "Pending", "Approved", and "Rejected". + :vartype status: str or + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointServiceConnectionStatus + :ivar description: The reason for approval/rejection of the connection. + :vartype description: str + :ivar actions_required: A message indicating if changes on the service provider require any + updates on the consumer. + :vartype actions_required: str + """ + + _attribute_map = { + "status": {"key": "status", "type": "str"}, + "description": {"key": "description", "type": "str"}, + "actions_required": {"key": "actionsRequired", "type": "str"}, + } + + def __init__( + self, + *, + status: Optional[Union[str, "_models.PrivateEndpointServiceConnectionStatus"]] = None, + description: Optional[str] = None, + actions_required: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword status: Indicates whether the connection has been Approved/Rejected/Removed by the + owner of the service. Known values are: "Pending", "Approved", and "Rejected". + :paramtype status: str or + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointServiceConnectionStatus + :keyword description: The reason for approval/rejection of the connection. + :paramtype description: str + :keyword actions_required: A message indicating if changes on the service provider require any + updates on the consumer. + :paramtype actions_required: str + """ + super().__init__(**kwargs) + self.status = status + self.description = description + self.actions_required = actions_required + + +class PropertyUpdatesInProgress(_serialization.Model): + """Properties of the disk for which update is pending. + + :ivar target_tier: The target performance tier of the disk if a tier change operation is in + progress. + :vartype target_tier: str + """ + + _attribute_map = { + "target_tier": {"key": "targetTier", "type": "str"}, + } + + def __init__(self, *, target_tier: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword target_tier: The target performance tier of the disk if a tier change operation is in + progress. + :paramtype target_tier: str + """ + super().__init__(**kwargs) + self.target_tier = target_tier + + +class PurchasePlan(_serialization.Model): + """Used for establishing the purchase context of any 3rd Party artifact through MarketPlace. + + All required parameters must be populated in order to send to Azure. + + :ivar name: The plan ID. Required. + :vartype name: str + :ivar publisher: The publisher ID. Required. + :vartype publisher: str + :ivar product: Specifies the product of the image from the marketplace. This is the same value + as Offer under the imageReference element. Required. + :vartype product: str + :ivar promotion_code: The Offer Promotion Code. + :vartype promotion_code: str + """ + + _validation = { + "name": {"required": True}, + "publisher": {"required": True}, + "product": {"required": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "publisher": {"key": "publisher", "type": "str"}, + "product": {"key": "product", "type": "str"}, + "promotion_code": {"key": "promotionCode", "type": "str"}, + } + + def __init__( + self, *, name: str, publisher: str, product: str, promotion_code: Optional[str] = None, **kwargs: Any + ) -> None: + """ + :keyword name: The plan ID. Required. + :paramtype name: str + :keyword publisher: The publisher ID. Required. + :paramtype publisher: str + :keyword product: Specifies the product of the image from the marketplace. This is the same + value as Offer under the imageReference element. Required. + :paramtype product: str + :keyword promotion_code: The Offer Promotion Code. + :paramtype promotion_code: str + """ + super().__init__(**kwargs) + self.name = name + self.publisher = publisher + self.product = product + self.promotion_code = promotion_code + + +class ResourceUriList(_serialization.Model): + """The List resources which are encrypted with the disk encryption set. + + All required parameters must be populated in order to send to Azure. + + :ivar value: A list of IDs or Owner IDs of resources which are encrypted with the disk + encryption set. Required. + :vartype value: list[str] + :ivar next_link: The uri to fetch the next page of encrypted resources. Call ListNext() with + this to fetch the next page of encrypted resources. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[str]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: List[str], next_link: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword value: A list of IDs or Owner IDs of resources which are encrypted with the disk + encryption set. Required. + :paramtype value: list[str] + :keyword next_link: The uri to fetch the next page of encrypted resources. Call ListNext() with + this to fetch the next page of encrypted resources. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class ResourceWithOptionalLocation(_serialization.Model): + """The Resource model definition with location property as optional. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar location: Resource location. + :vartype location: str + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + } + + _attribute_map = { + "location": {"key": "location", "type": "str"}, + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "tags": {"key": "tags", "type": "{str}"}, + } + + def __init__(self, *, location: Optional[str] = None, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> None: + """ + :keyword location: Resource location. + :paramtype location: str + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + """ + super().__init__(**kwargs) + self.location = location + self.id = None + self.name = None + self.type = None + self.tags = tags + + +class ShareInfoElement(_serialization.Model): + """ShareInfoElement. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar vm_uri: A relative URI containing the ID of the VM that has the disk attached. + :vartype vm_uri: str + """ + + _validation = { + "vm_uri": {"readonly": True}, + } + + _attribute_map = { + "vm_uri": {"key": "vmUri", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.vm_uri = None + + +class Snapshot(Resource): # pylint: disable=too-many-instance-attributes + """Snapshot resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: Resource Id. + :vartype id: str + :ivar name: Resource name. + :vartype name: str + :ivar type: Resource type. + :vartype type: str + :ivar location: Resource location. Required. + :vartype location: str + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar managed_by: Unused. Always Null. + :vartype managed_by: str + :ivar sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is + an optional parameter for incremental snapshot and the default behavior is the SKU will be set + to the same sku as the previous snapshot. + :vartype sku: ~azure.mgmt.compute.v2023_01_02.models.SnapshotSku + :ivar extended_location: The extended location where the snapshot will be created. Extended + location cannot be changed. + :vartype extended_location: ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocation + :ivar time_created: The time when the snapshot was created. + :vartype time_created: ~datetime.datetime + :ivar os_type: The Operating System type. Known values are: "Windows" and "Linux". + :vartype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :ivar hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS + disks only. Known values are: "V1" and "V2". + :vartype hyper_v_generation: str or ~azure.mgmt.compute.v2023_01_02.models.HyperVGeneration + :ivar purchase_plan: Purchase plan information for the image from which the source disk for the + snapshot was originally created. + :vartype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :ivar supported_capabilities: List of supported capabilities for the image from which the + source disk from the snapshot was originally created. + :vartype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :ivar creation_data: Disk source information. CreationData information cannot be changed after + the disk has been created. + :vartype creation_data: ~azure.mgmt.compute.v2023_01_02.models.CreationData + :ivar disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :vartype disk_size_gb: int + :ivar disk_size_bytes: The size of the disk in bytes. This field is read only. + :vartype disk_size_bytes: int + :ivar disk_state: The state of the snapshot. Known values are: "Unattached", "Attached", + "Reserved", "Frozen", "ActiveSAS", "ActiveSASFrozen", "ReadyToUpload", and "ActiveUpload". + :vartype disk_state: str or ~azure.mgmt.compute.v2023_01_02.models.DiskState + :ivar unique_id: Unique Guid identifying the resource. + :vartype unique_id: str + :ivar encryption_settings_collection: Encryption settings collection used be Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :vartype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :ivar provisioning_state: The disk provisioning state. + :vartype provisioning_state: str + :ivar incremental: Whether a snapshot is incremental. Incremental snapshots on the same disk + occupy less space than full snapshots and can be diffed. + :vartype incremental: bool + :ivar incremental_snapshot_family_id: Incremental snapshots for a disk share an incremental + snapshot family id. The Get Page Range Diff API can only be called on incremental snapshots + with the same family id. + :vartype incremental_snapshot_family_id: str + :ivar encryption: Encryption property can be used to encrypt data at rest with customer managed + keys or platform managed keys. + :vartype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :ivar network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :vartype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :ivar disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks. + :vartype disk_access_id: str + :ivar security_profile: Contains the security related information for the resource. + :vartype security_profile: ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityProfile + :ivar supports_hibernation: Indicates the OS on a snapshot supports hibernation. + :vartype supports_hibernation: bool + :ivar public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :ivar completion_percent: Percentage complete for the background copy when a resource is + created via the CopyStart operation. + :vartype completion_percent: float + :ivar copy_completion_error: Indicates the error details if the background copy of a resource + created via the CopyStart operation fails. + :vartype copy_completion_error: ~azure.mgmt.compute.v2023_01_02.models.CopyCompletionError + :ivar data_access_auth_mode: Additional authentication requirements when exporting or uploading + to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :vartype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + """ + + _validation = { + "id": {"readonly": True}, + "name": {"readonly": True}, + "type": {"readonly": True}, + "location": {"required": True}, + "managed_by": {"readonly": True}, + "time_created": {"readonly": True}, + "disk_size_bytes": {"readonly": True}, + "disk_state": {"readonly": True}, + "unique_id": {"readonly": True}, + "provisioning_state": {"readonly": True}, + "incremental_snapshot_family_id": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + "name": {"key": "name", "type": "str"}, + "type": {"key": "type", "type": "str"}, + "location": {"key": "location", "type": "str"}, + "tags": {"key": "tags", "type": "{str}"}, + "managed_by": {"key": "managedBy", "type": "str"}, + "sku": {"key": "sku", "type": "SnapshotSku"}, + "extended_location": {"key": "extendedLocation", "type": "ExtendedLocation"}, + "time_created": {"key": "properties.timeCreated", "type": "iso-8601"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "hyper_v_generation": {"key": "properties.hyperVGeneration", "type": "str"}, + "purchase_plan": {"key": "properties.purchasePlan", "type": "PurchasePlan"}, + "supported_capabilities": {"key": "properties.supportedCapabilities", "type": "SupportedCapabilities"}, + "creation_data": {"key": "properties.creationData", "type": "CreationData"}, + "disk_size_gb": {"key": "properties.diskSizeGB", "type": "int"}, + "disk_size_bytes": {"key": "properties.diskSizeBytes", "type": "int"}, + "disk_state": {"key": "properties.diskState", "type": "str"}, + "unique_id": {"key": "properties.uniqueId", "type": "str"}, + "encryption_settings_collection": { + "key": "properties.encryptionSettingsCollection", + "type": "EncryptionSettingsCollection", + }, + "provisioning_state": {"key": "properties.provisioningState", "type": "str"}, + "incremental": {"key": "properties.incremental", "type": "bool"}, + "incremental_snapshot_family_id": {"key": "properties.incrementalSnapshotFamilyId", "type": "str"}, + "encryption": {"key": "properties.encryption", "type": "Encryption"}, + "network_access_policy": {"key": "properties.networkAccessPolicy", "type": "str"}, + "disk_access_id": {"key": "properties.diskAccessId", "type": "str"}, + "security_profile": {"key": "properties.securityProfile", "type": "DiskSecurityProfile"}, + "supports_hibernation": {"key": "properties.supportsHibernation", "type": "bool"}, + "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"}, + "completion_percent": {"key": "properties.completionPercent", "type": "float"}, + "copy_completion_error": {"key": "properties.copyCompletionError", "type": "CopyCompletionError"}, + "data_access_auth_mode": {"key": "properties.dataAccessAuthMode", "type": "str"}, + } + + def __init__( # pylint: disable=too-many-locals + self, + *, + location: str, + tags: Optional[Dict[str, str]] = None, + sku: Optional["_models.SnapshotSku"] = None, + extended_location: Optional["_models.ExtendedLocation"] = None, + os_type: Optional[Union[str, "_models.OperatingSystemTypes"]] = None, + hyper_v_generation: Optional[Union[str, "_models.HyperVGeneration"]] = None, + purchase_plan: Optional["_models.PurchasePlan"] = None, + supported_capabilities: Optional["_models.SupportedCapabilities"] = None, + creation_data: Optional["_models.CreationData"] = None, + disk_size_gb: Optional[int] = None, + encryption_settings_collection: Optional["_models.EncryptionSettingsCollection"] = None, + incremental: Optional[bool] = None, + encryption: Optional["_models.Encryption"] = None, + network_access_policy: Optional[Union[str, "_models.NetworkAccessPolicy"]] = None, + disk_access_id: Optional[str] = None, + security_profile: Optional["_models.DiskSecurityProfile"] = None, + supports_hibernation: Optional[bool] = None, + public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None, + completion_percent: Optional[float] = None, + copy_completion_error: Optional["_models.CopyCompletionError"] = None, + data_access_auth_mode: Optional[Union[str, "_models.DataAccessAuthMode"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword location: Resource location. Required. + :paramtype location: str + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This + is an optional parameter for incremental snapshot and the default behavior is the SKU will be + set to the same sku as the previous snapshot. + :paramtype sku: ~azure.mgmt.compute.v2023_01_02.models.SnapshotSku + :keyword extended_location: The extended location where the snapshot will be created. Extended + location cannot be changed. + :paramtype extended_location: ~azure.mgmt.compute.v2023_01_02.models.ExtendedLocation + :keyword os_type: The Operating System type. Known values are: "Windows" and "Linux". + :paramtype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :keyword hyper_v_generation: The hypervisor generation of the Virtual Machine. Applicable to OS + disks only. Known values are: "V1" and "V2". + :paramtype hyper_v_generation: str or ~azure.mgmt.compute.v2023_01_02.models.HyperVGeneration + :keyword purchase_plan: Purchase plan information for the image from which the source disk for + the snapshot was originally created. + :paramtype purchase_plan: ~azure.mgmt.compute.v2023_01_02.models.PurchasePlan + :keyword supported_capabilities: List of supported capabilities for the image from which the + source disk from the snapshot was originally created. + :paramtype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + :keyword creation_data: Disk source information. CreationData information cannot be changed + after the disk has been created. + :paramtype creation_data: ~azure.mgmt.compute.v2023_01_02.models.CreationData + :keyword disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :paramtype disk_size_gb: int + :keyword encryption_settings_collection: Encryption settings collection used be Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :paramtype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :keyword incremental: Whether a snapshot is incremental. Incremental snapshots on the same disk + occupy less space than full snapshots and can be diffed. + :paramtype incremental: bool + :keyword encryption: Encryption property can be used to encrypt data at rest with customer + managed keys or platform managed keys. + :paramtype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :keyword network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :paramtype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :keyword disk_access_id: ARM id of the DiskAccess resource for using private endpoints on + disks. + :paramtype disk_access_id: str + :keyword security_profile: Contains the security related information for the resource. + :paramtype security_profile: ~azure.mgmt.compute.v2023_01_02.models.DiskSecurityProfile + :keyword supports_hibernation: Indicates the OS on a snapshot supports hibernation. + :paramtype supports_hibernation: bool + :keyword public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :paramtype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :keyword completion_percent: Percentage complete for the background copy when a resource is + created via the CopyStart operation. + :paramtype completion_percent: float + :keyword copy_completion_error: Indicates the error details if the background copy of a + resource created via the CopyStart operation fails. + :paramtype copy_completion_error: ~azure.mgmt.compute.v2023_01_02.models.CopyCompletionError + :keyword data_access_auth_mode: Additional authentication requirements when exporting or + uploading to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :paramtype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + """ + super().__init__(location=location, tags=tags, **kwargs) + self.managed_by = None + self.sku = sku + self.extended_location = extended_location + self.time_created = None + self.os_type = os_type + self.hyper_v_generation = hyper_v_generation + self.purchase_plan = purchase_plan + self.supported_capabilities = supported_capabilities + self.creation_data = creation_data + self.disk_size_gb = disk_size_gb + self.disk_size_bytes = None + self.disk_state = None + self.unique_id = None + self.encryption_settings_collection = encryption_settings_collection + self.provisioning_state = None + self.incremental = incremental + self.incremental_snapshot_family_id = None + self.encryption = encryption + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.security_profile = security_profile + self.supports_hibernation = supports_hibernation + self.public_network_access = public_network_access + self.completion_percent = completion_percent + self.copy_completion_error = copy_completion_error + self.data_access_auth_mode = data_access_auth_mode + + +class SnapshotList(_serialization.Model): + """The List Snapshots operation response. + + All required parameters must be populated in order to send to Azure. + + :ivar value: A list of snapshots. Required. + :vartype value: list[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :ivar next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to + fetch the next page of snapshots. + :vartype next_link: str + """ + + _validation = { + "value": {"required": True}, + } + + _attribute_map = { + "value": {"key": "value", "type": "[Snapshot]"}, + "next_link": {"key": "nextLink", "type": "str"}, + } + + def __init__(self, *, value: List["_models.Snapshot"], next_link: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword value: A list of snapshots. Required. + :paramtype value: list[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :keyword next_link: The uri to fetch the next page of snapshots. Call ListNext() with this to + fetch the next page of snapshots. + :paramtype next_link: str + """ + super().__init__(**kwargs) + self.value = value + self.next_link = next_link + + +class SnapshotSku(_serialization.Model): + """The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is an optional + parameter for incremental snapshot and the default behavior is the SKU will be set to the same + sku as the previous snapshot. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: The sku name. Known values are: "Standard_LRS", "Premium_LRS", and "Standard_ZRS". + :vartype name: str or ~azure.mgmt.compute.v2023_01_02.models.SnapshotStorageAccountTypes + :ivar tier: The sku tier. + :vartype tier: str + """ + + _validation = { + "tier": {"readonly": True}, + } + + _attribute_map = { + "name": {"key": "name", "type": "str"}, + "tier": {"key": "tier", "type": "str"}, + } + + def __init__( + self, *, name: Optional[Union[str, "_models.SnapshotStorageAccountTypes"]] = None, **kwargs: Any + ) -> None: + """ + :keyword name: The sku name. Known values are: "Standard_LRS", "Premium_LRS", and + "Standard_ZRS". + :paramtype name: str or ~azure.mgmt.compute.v2023_01_02.models.SnapshotStorageAccountTypes + """ + super().__init__(**kwargs) + self.name = name + self.tier = None + + +class SnapshotUpdate(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Snapshot update resource. + + :ivar tags: Resource tags. + :vartype tags: dict[str, str] + :ivar sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This is + an optional parameter for incremental snapshot and the default behavior is the SKU will be set + to the same sku as the previous snapshot. + :vartype sku: ~azure.mgmt.compute.v2023_01_02.models.SnapshotSku + :ivar os_type: the Operating System type. Known values are: "Windows" and "Linux". + :vartype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :ivar disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :vartype disk_size_gb: int + :ivar encryption_settings_collection: Encryption settings collection used be Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :vartype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :ivar encryption: Encryption property can be used to encrypt data at rest with customer managed + keys or platform managed keys. + :vartype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :ivar network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :vartype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :ivar disk_access_id: ARM id of the DiskAccess resource for using private endpoints on disks. + :vartype disk_access_id: str + :ivar supports_hibernation: Indicates the OS on a snapshot supports hibernation. + :vartype supports_hibernation: bool + :ivar public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :vartype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :ivar data_access_auth_mode: Additional authentication requirements when exporting or uploading + to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :vartype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + :ivar supported_capabilities: List of supported capabilities for the image from which the OS + disk was created. + :vartype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + """ + + _attribute_map = { + "tags": {"key": "tags", "type": "{str}"}, + "sku": {"key": "sku", "type": "SnapshotSku"}, + "os_type": {"key": "properties.osType", "type": "str"}, + "disk_size_gb": {"key": "properties.diskSizeGB", "type": "int"}, + "encryption_settings_collection": { + "key": "properties.encryptionSettingsCollection", + "type": "EncryptionSettingsCollection", + }, + "encryption": {"key": "properties.encryption", "type": "Encryption"}, + "network_access_policy": {"key": "properties.networkAccessPolicy", "type": "str"}, + "disk_access_id": {"key": "properties.diskAccessId", "type": "str"}, + "supports_hibernation": {"key": "properties.supportsHibernation", "type": "bool"}, + "public_network_access": {"key": "properties.publicNetworkAccess", "type": "str"}, + "data_access_auth_mode": {"key": "properties.dataAccessAuthMode", "type": "str"}, + "supported_capabilities": {"key": "properties.supportedCapabilities", "type": "SupportedCapabilities"}, + } + + def __init__( + self, + *, + tags: Optional[Dict[str, str]] = None, + sku: Optional["_models.SnapshotSku"] = None, + os_type: Optional[Union[str, "_models.OperatingSystemTypes"]] = None, + disk_size_gb: Optional[int] = None, + encryption_settings_collection: Optional["_models.EncryptionSettingsCollection"] = None, + encryption: Optional["_models.Encryption"] = None, + network_access_policy: Optional[Union[str, "_models.NetworkAccessPolicy"]] = None, + disk_access_id: Optional[str] = None, + supports_hibernation: Optional[bool] = None, + public_network_access: Optional[Union[str, "_models.PublicNetworkAccess"]] = None, + data_access_auth_mode: Optional[Union[str, "_models.DataAccessAuthMode"]] = None, + supported_capabilities: Optional["_models.SupportedCapabilities"] = None, + **kwargs: Any + ) -> None: + """ + :keyword tags: Resource tags. + :paramtype tags: dict[str, str] + :keyword sku: The snapshots sku name. Can be Standard_LRS, Premium_LRS, or Standard_ZRS. This + is an optional parameter for incremental snapshot and the default behavior is the SKU will be + set to the same sku as the previous snapshot. + :paramtype sku: ~azure.mgmt.compute.v2023_01_02.models.SnapshotSku + :keyword os_type: the Operating System type. Known values are: "Windows" and "Linux". + :paramtype os_type: str or ~azure.mgmt.compute.v2023_01_02.models.OperatingSystemTypes + :keyword disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it + indicates the size of the disk to create. If this field is present for updates or creation with + other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a + running VM, and can only increase the disk's size. + :paramtype disk_size_gb: int + :keyword encryption_settings_collection: Encryption settings collection used be Azure Disk + Encryption, can contain multiple encryption settings per disk or snapshot. + :paramtype encryption_settings_collection: + ~azure.mgmt.compute.v2023_01_02.models.EncryptionSettingsCollection + :keyword encryption: Encryption property can be used to encrypt data at rest with customer + managed keys or platform managed keys. + :paramtype encryption: ~azure.mgmt.compute.v2023_01_02.models.Encryption + :keyword network_access_policy: Policy for accessing the disk via network. Known values are: + "AllowAll", "AllowPrivate", and "DenyAll". + :paramtype network_access_policy: str or + ~azure.mgmt.compute.v2023_01_02.models.NetworkAccessPolicy + :keyword disk_access_id: ARM id of the DiskAccess resource for using private endpoints on + disks. + :paramtype disk_access_id: str + :keyword supports_hibernation: Indicates the OS on a snapshot supports hibernation. + :paramtype supports_hibernation: bool + :keyword public_network_access: Policy for controlling export on the disk. Known values are: + "Enabled" and "Disabled". + :paramtype public_network_access: str or + ~azure.mgmt.compute.v2023_01_02.models.PublicNetworkAccess + :keyword data_access_auth_mode: Additional authentication requirements when exporting or + uploading to a disk or snapshot. Known values are: "AzureActiveDirectory" and "None". + :paramtype data_access_auth_mode: str or + ~azure.mgmt.compute.v2023_01_02.models.DataAccessAuthMode + :keyword supported_capabilities: List of supported capabilities for the image from which the OS + disk was created. + :paramtype supported_capabilities: ~azure.mgmt.compute.v2023_01_02.models.SupportedCapabilities + """ + super().__init__(**kwargs) + self.tags = tags + self.sku = sku + self.os_type = os_type + self.disk_size_gb = disk_size_gb + self.encryption_settings_collection = encryption_settings_collection + self.encryption = encryption + self.network_access_policy = network_access_policy + self.disk_access_id = disk_access_id + self.supports_hibernation = supports_hibernation + self.public_network_access = public_network_access + self.data_access_auth_mode = data_access_auth_mode + self.supported_capabilities = supported_capabilities + + +class SourceVault(_serialization.Model): + """The vault id is an Azure Resource Manager Resource id in the form + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}. + + :ivar id: Resource Id. + :vartype id: str + """ + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + } + + def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin + """ + :keyword id: Resource Id. + :paramtype id: str + """ + super().__init__(**kwargs) + self.id = id + + +class SubResource(_serialization.Model): + """SubResource. + + :ivar id: Resource Id. + :vartype id: str + """ + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + } + + def __init__(self, *, id: Optional[str] = None, **kwargs: Any) -> None: # pylint: disable=redefined-builtin + """ + :keyword id: Resource Id. + :paramtype id: str + """ + super().__init__(**kwargs) + self.id = id + + +class SubResourceReadOnly(_serialization.Model): + """SubResourceReadOnly. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Resource Id. + :vartype id: str + """ + + _validation = { + "id": {"readonly": True}, + } + + _attribute_map = { + "id": {"key": "id", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.id = None + + +class SupportedCapabilities(_serialization.Model): + """List of supported capabilities persisted on the disk resource for VM use. + + :ivar disk_controller_types: The disk controllers that an OS disk supports. If set it can be + SCSI or SCSI, NVME or NVME, SCSI. + :vartype disk_controller_types: str + :ivar accelerated_network: True if the image from which the OS disk is created supports + accelerated networking. + :vartype accelerated_network: bool + :ivar architecture: CPU architecture supported by an OS disk. Known values are: "x64" and + "Arm64". + :vartype architecture: str or ~azure.mgmt.compute.v2023_01_02.models.Architecture + """ + + _attribute_map = { + "disk_controller_types": {"key": "diskControllerTypes", "type": "str"}, + "accelerated_network": {"key": "acceleratedNetwork", "type": "bool"}, + "architecture": {"key": "architecture", "type": "str"}, + } + + def __init__( + self, + *, + disk_controller_types: Optional[str] = None, + accelerated_network: Optional[bool] = None, + architecture: Optional[Union[str, "_models.Architecture"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword disk_controller_types: The disk controllers that an OS disk supports. If set it can be + SCSI or SCSI, NVME or NVME, SCSI. + :paramtype disk_controller_types: str + :keyword accelerated_network: True if the image from which the OS disk is created supports + accelerated networking. + :paramtype accelerated_network: bool + :keyword architecture: CPU architecture supported by an OS disk. Known values are: "x64" and + "Arm64". + :paramtype architecture: str or ~azure.mgmt.compute.v2023_01_02.models.Architecture + """ + super().__init__(**kwargs) + self.disk_controller_types = disk_controller_types + self.accelerated_network = accelerated_network + self.architecture = architecture + + +class SystemData(_serialization.Model): + """The system meta data relating to this resource. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar created_at: Specifies the time in UTC at which the Cloud Service (extended support) + resource was created. :code:`
`Minimum api-version: 2022-04-04. + :vartype created_at: ~datetime.datetime + :ivar last_modified_at: Specifies the time in UTC at which the Cloud Service (extended support) + resource was last modified. :code:`
`Minimum api-version: 2022-04-04. + :vartype last_modified_at: ~datetime.datetime + """ + + _validation = { + "created_at": {"readonly": True}, + "last_modified_at": {"readonly": True}, + } + + _attribute_map = { + "created_at": {"key": "createdAt", "type": "iso-8601"}, + "last_modified_at": {"key": "lastModifiedAt", "type": "iso-8601"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.created_at = None + self.last_modified_at = None + + +class UserAssignedIdentitiesValue(_serialization.Model): + """UserAssignedIdentitiesValue. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar principal_id: The principal id of user assigned identity. + :vartype principal_id: str + :ivar client_id: The client id of user assigned identity. + :vartype client_id: str + """ + + _validation = { + "principal_id": {"readonly": True}, + "client_id": {"readonly": True}, + } + + _attribute_map = { + "principal_id": {"key": "principalId", "type": "str"}, + "client_id": {"key": "clientId", "type": "str"}, + } + + def __init__(self, **kwargs: Any) -> None: + """ """ + super().__init__(**kwargs) + self.principal_id = None + self.client_id = None diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_patch.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_patch.py new file mode 100644 index 0000000000000..f7dd32510333d --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/__init__.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/__init__.py new file mode 100644 index 0000000000000..8ca1cc2598c3b --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/__init__.py @@ -0,0 +1,27 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._operations import DisksOperations +from ._operations import DiskAccessesOperations +from ._operations import DiskEncryptionSetsOperations +from ._operations import DiskRestorePointOperations +from ._operations import SnapshotsOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "DisksOperations", + "DiskAccessesOperations", + "DiskEncryptionSetsOperations", + "DiskRestorePointOperations", + "SnapshotsOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_operations.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_operations.py new file mode 100644 index 0000000000000..ebd58d4e283e8 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_operations.py @@ -0,0 +1,6419 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload +import urllib.parse + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.polling import LROPoller, NoPolling, PollingMethod +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict +from azure.mgmt.core.exceptions import ARMErrorFormat +from azure.mgmt.core.polling.arm_polling import ARMPolling + +from .. import models as _models +from ..._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_disks_create_or_update_request( + resource_group_name: str, disk_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskName": _SERIALIZER.url("disk_name", disk_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disks_update_request( + resource_group_name: str, disk_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskName": _SERIALIZER.url("disk_name", disk_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disks_get_request( + resource_group_name: str, disk_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskName": _SERIALIZER.url("disk_name", disk_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disks_delete_request( + resource_group_name: str, disk_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskName": _SERIALIZER.url("disk_name", disk_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_disks_list_by_resource_group_request( + resource_group_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disks_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks") + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disks_grant_access_request( + resource_group_name: str, disk_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskName": _SERIALIZER.url("disk_name", disk_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disks_revoke_access_request( + resource_group_name: str, disk_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskName": _SERIALIZER.url("disk_name", disk_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="POST", url=_url, params=_params, **kwargs) + + +def build_disk_accesses_create_or_update_request( + resource_group_name: str, disk_access_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_update_request( + resource_group_name: str, disk_access_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_get_request( + resource_group_name: str, disk_access_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_delete_request( + resource_group_name: str, disk_access_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_list_by_resource_group_request( + resource_group_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses") + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_get_private_link_resources_request( + resource_group_name: str, disk_access_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_update_a_private_endpoint_connection_request( + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_get_a_private_endpoint_connection_request( + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_delete_a_private_endpoint_connection_request( + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + "privateEndpointConnectionName": _SERIALIZER.url( + "private_endpoint_connection_name", private_endpoint_connection_name, "str" + ), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_accesses_list_private_endpoint_connections_request( + resource_group_name: str, disk_access_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskAccessName": _SERIALIZER.url("disk_access_name", disk_access_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_create_or_update_request( + resource_group_name: str, disk_encryption_set_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_update_request( + resource_group_name: str, disk_encryption_set_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_get_request( + resource_group_name: str, disk_encryption_set_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_delete_request( + resource_group_name: str, disk_encryption_set_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_list_by_resource_group_request( + resource_group_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets") + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_encryption_sets_list_associated_resources_request( + resource_group_name: str, disk_encryption_set_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_restore_point_get_request( + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "restorePointCollectionName": _SERIALIZER.url( + "restore_point_collection_name", restore_point_collection_name, "str" + ), + "vmRestorePointName": _SERIALIZER.url("vm_restore_point_name", vm_restore_point_name, "str"), + "diskRestorePointName": _SERIALIZER.url("disk_restore_point_name", disk_restore_point_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_restore_point_list_by_restore_point_request( + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "restorePointCollectionName": _SERIALIZER.url( + "restore_point_collection_name", restore_point_collection_name, "str" + ), + "vmRestorePointName": _SERIALIZER.url("vm_restore_point_name", vm_restore_point_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_restore_point_grant_access_request( + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "restorePointCollectionName": _SERIALIZER.url( + "restore_point_collection_name", restore_point_collection_name, "str" + ), + "vmRestorePointName": _SERIALIZER.url("vm_restore_point_name", vm_restore_point_name, "str"), + "diskRestorePointName": _SERIALIZER.url("disk_restore_point_name", disk_restore_point_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_disk_restore_point_revoke_access_request( + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + subscription_id: str, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "restorePointCollectionName": _SERIALIZER.url( + "restore_point_collection_name", restore_point_collection_name, "str" + ), + "vmRestorePointName": _SERIALIZER.url("vm_restore_point_name", vm_restore_point_name, "str"), + "diskRestorePointName": _SERIALIZER.url("disk_restore_point_name", disk_restore_point_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_create_or_update_request( + resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_update_request( + resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_get_request( + resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_delete_request( + resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs) + + +def build_snapshots_list_by_resource_group_request( + resource_group_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots") + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_grant_access_request( + resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_snapshots_revoke_access_request( + resource_group_name: str, snapshot_name: str, subscription_id: str, **kwargs: Any +) -> HttpRequest: + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-01-02")) + # Construct URL + _url = kwargs.pop( + "template_url", + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", + ) # pylint: disable=line-too-long + path_format_arguments = { + "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"), + "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"), + "snapshotName": _SERIALIZER.url("snapshot_name", snapshot_name, "str"), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + return HttpRequest(method="POST", url=_url, params=_params, **kwargs) + + +class DisksOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.ComputeManagementClient`'s + :attr:`disks` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + def _create_or_update_initial( + self, resource_group_name: str, disk_name: str, disk: Union[_models.Disk, IO], **kwargs: Any + ) -> _models.Disk: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk, (IOBase, bytes)): + _content = disk + else: + _json = self._serialize.body(disk, "Disk") + + request = build_disks_create_or_update_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Disk", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Disk", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + disk_name: str, + disk: _models.Disk, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Disk]: + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk operation. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.Disk + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + disk_name: str, + disk: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Disk]: + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk operation. Required. + :type disk: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, resource_group_name: str, disk_name: str, disk: Union[_models.Disk, IO], **kwargs: Any + ) -> LROPoller[_models.Disk]: + """Creates or updates a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Put disk operation. Is either a Disk type + or a IO type. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.Disk or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + disk=disk, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Disk", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + def _update_initial( + self, resource_group_name: str, disk_name: str, disk: Union[_models.DiskUpdate, IO], **kwargs: Any + ) -> _models.Disk: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk, (IOBase, bytes)): + _content = disk + else: + _json = self._serialize.body(disk, "DiskUpdate") + + request = build_disks_update_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Disk", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Disk", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @overload + def begin_update( + self, + resource_group_name: str, + disk_name: str, + disk: _models.DiskUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Disk]: + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk operation. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.DiskUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + disk_name: str, + disk: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Disk]: + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk operation. Required. + :type disk: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, resource_group_name: str, disk_name: str, disk: Union[_models.DiskUpdate, IO], **kwargs: Any + ) -> LROPoller[_models.Disk]: + """Updates (patches) a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param disk: Disk object supplied in the body of the Patch disk operation. Is either a + DiskUpdate type or a IO type. Required. + :type disk: ~azure.mgmt.compute.v2023_01_02.models.DiskUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Disk or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + disk=disk, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Disk", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @distributed_trace + def get(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> _models.Disk: + """Gets information about a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Disk or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.Disk + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.Disk] = kwargs.pop("cls", None) + + request = build_disks_get_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Disk", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disks_delete_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @distributed_trace + def begin_delete(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> LROPoller[None]: + """Deletes a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + disk_name=disk_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Disk"]: + """Lists all the disks under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Disk or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disks_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks" + } + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.Disk"]: + """Lists all the disks under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Disk or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.Disk] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disks_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks"} + + def _grant_access_initial( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> Optional[_models.AccessUri]: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Optional[_models.AccessUri]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(grant_access_data, (IOBase, bytes)): + _content = grant_access_data + else: + _json = self._serialize.body(grant_access_data, "GrantAccessData") + + request = build_disks_grant_access_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._grant_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize("AccessUri", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + _grant_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess" + } + + @overload + def begin_grant_access( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: _models.GrantAccessData, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_grant_access( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_grant_access( + self, + resource_group_name: str, + disk_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Is either a GrantAccessData type or a IO type. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AccessUri] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._grant_access_initial( + resource_group_name=resource_group_name, + disk_name=disk_name, + grant_access_data=grant_access_data, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AccessUri", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_grant_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess" + } + + def _revoke_access_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disks_revoke_access_request( + resource_group_name=resource_group_name, + disk_name=disk_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._revoke_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _revoke_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess" + } + + @distributed_trace + def begin_revoke_access(self, resource_group_name: str, disk_name: str, **kwargs: Any) -> LROPoller[None]: + """Revokes access to a disk. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_name: The name of the managed disk that is being created. The name can't be changed + after the disk is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The + maximum name length is 80 characters. Required. + :type disk_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._revoke_access_initial( # type: ignore + resource_group_name=resource_group_name, + disk_name=disk_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_revoke_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess" + } + + +class DiskAccessesOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.ComputeManagementClient`'s + :attr:`disk_accesses` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + def _create_or_update_initial( + self, resource_group_name: str, disk_access_name: str, disk_access: Union[_models.DiskAccess, IO], **kwargs: Any + ) -> _models.DiskAccess: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_access, (IOBase, bytes)): + _content = disk_access + else: + _json = self._serialize.body(disk_access, "DiskAccess") + + request = build_disk_accesses_create_or_update_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: _models.DiskAccess, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskAccess]: + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Put disk access operation. + Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccess + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskAccess or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskAccess]: + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Put disk access operation. + Required. + :type disk_access: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskAccess or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, resource_group_name: str, disk_access_name: str, disk_access: Union[_models.DiskAccess, IO], **kwargs: Any + ) -> LROPoller[_models.DiskAccess]: + """Creates or updates a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Put disk access operation. + Is either a DiskAccess type or a IO type. Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccess or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskAccess or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + disk_access=disk_access, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskAccess", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + def _update_initial( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: Union[_models.DiskAccessUpdate, IO], + **kwargs: Any + ) -> _models.DiskAccess: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_access, (IOBase, bytes)): + _content = disk_access + else: + _json = self._serialize.body(disk_access, "DiskAccessUpdate") + + request = build_disk_accesses_update_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @overload + def begin_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: _models.DiskAccessUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskAccess]: + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Patch disk access operation. + Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccessUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskAccess or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskAccess]: + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Patch disk access operation. + Required. + :type disk_access: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskAccess or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + disk_access_name: str, + disk_access: Union[_models.DiskAccessUpdate, IO], + **kwargs: Any + ) -> LROPoller[_models.DiskAccess]: + """Updates (patches) a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param disk_access: disk access object supplied in the body of the Patch disk access operation. + Is either a DiskAccessUpdate type or a IO type. Required. + :type disk_access: ~azure.mgmt.compute.v2023_01_02.models.DiskAccessUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskAccess or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + disk_access=disk_access, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskAccess", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @distributed_trace + def get(self, resource_group_name: str, disk_access_name: str, **kwargs: Any) -> _models.DiskAccess: + """Gets information about a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DiskAccess or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.DiskAccess + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskAccess] = kwargs.pop("cls", None) + + request = build_disk_accesses_get_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("DiskAccess", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_accesses_delete_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @distributed_trace + def begin_delete(self, resource_group_name: str, disk_access_name: str, **kwargs: Any) -> LROPoller[None]: + """Deletes a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.DiskAccess"]: + """Lists all the disk access resources under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskAccess or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskAccessList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_accesses_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskAccessList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses" + } + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.DiskAccess"]: + """Lists all the disk access resources under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskAccess or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskAccess] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskAccessList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_accesses_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskAccessList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses"} + + @distributed_trace + def get_private_link_resources( + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> _models.PrivateLinkResourceListResult: + """Gets the private link resources possible under disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PrivateLinkResourceListResult or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.PrivateLinkResourceListResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.PrivateLinkResourceListResult] = kwargs.pop("cls", None) + + request = build_disk_accesses_get_private_link_resources_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get_private_link_resources.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateLinkResourceListResult", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get_private_link_resources.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources" + } + + def _update_a_private_endpoint_connection_initial( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO], + **kwargs: Any + ) -> _models.PrivateEndpointConnection: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(private_endpoint_connection, (IOBase, bytes)): + _content = private_endpoint_connection + else: + _json = self._serialize.body(private_endpoint_connection, "PrivateEndpointConnection") + + request = build_disk_accesses_update_a_private_endpoint_connection_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_a_private_endpoint_connection_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_a_private_endpoint_connection_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @overload + def begin_update_a_private_endpoint_connection( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: _models.PrivateEndpointConnection, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approve or reject a private endpoint connection under disk access resource, this can't be used + to create a new private endpoint connection. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param private_endpoint_connection: private endpoint connection object supplied in the body of + the Put private endpoint connection operation. Required. + :type private_endpoint_connection: + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result + of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update_a_private_endpoint_connection( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approve or reject a private endpoint connection under disk access resource, this can't be used + to create a new private endpoint connection. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param private_endpoint_connection: private endpoint connection object supplied in the body of + the Put private endpoint connection operation. Required. + :type private_endpoint_connection: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result + of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update_a_private_endpoint_connection( + self, + resource_group_name: str, + disk_access_name: str, + private_endpoint_connection_name: str, + private_endpoint_connection: Union[_models.PrivateEndpointConnection, IO], + **kwargs: Any + ) -> LROPoller[_models.PrivateEndpointConnection]: + """Approve or reject a private endpoint connection under disk access resource, this can't be used + to create a new private endpoint connection. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :param private_endpoint_connection: private endpoint connection object supplied in the body of + the Put private endpoint connection operation. Is either a PrivateEndpointConnection type or a + IO type. Required. + :type private_endpoint_connection: + ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either PrivateEndpointConnection or the result + of cls(response) + :rtype: + ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_a_private_endpoint_connection_initial( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + private_endpoint_connection=private_endpoint_connection, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update_a_private_endpoint_connection.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @distributed_trace + def get_a_private_endpoint_connection( + self, resource_group_name: str, disk_access_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> _models.PrivateEndpointConnection: + """Gets information about a private endpoint connection under a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PrivateEndpointConnection or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.PrivateEndpointConnection] = kwargs.pop("cls", None) + + request = build_disk_accesses_get_a_private_endpoint_connection_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get_a_private_endpoint_connection.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get_a_private_endpoint_connection.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + def _delete_a_private_endpoint_connection_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_access_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_accesses_delete_a_private_endpoint_connection_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_a_private_endpoint_connection_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_a_private_endpoint_connection_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @distributed_trace + def begin_delete_a_private_endpoint_connection( + self, resource_group_name: str, disk_access_name: str, private_endpoint_connection_name: str, **kwargs: Any + ) -> LROPoller[None]: + """Deletes a private endpoint connection under a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :param private_endpoint_connection_name: The name of the private endpoint connection. Required. + :type private_endpoint_connection_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_a_private_endpoint_connection_initial( # type: ignore + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + private_endpoint_connection_name=private_endpoint_connection_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete_a_private_endpoint_connection.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + } + + @distributed_trace + def list_private_endpoint_connections( + self, resource_group_name: str, disk_access_name: str, **kwargs: Any + ) -> Iterable["_models.PrivateEndpointConnection"]: + """List information about private endpoint connections under a disk access resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_access_name: The name of the disk access resource that is being created. The name + can't be changed after the disk encryption set is created. Supported characters for the name + are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_access_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either PrivateEndpointConnection or the result of + cls(response) + :rtype: + ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.PrivateEndpointConnection] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.PrivateEndpointConnectionListResult] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_accesses_list_private_endpoint_connections_request( + resource_group_name=resource_group_name, + disk_access_name=disk_access_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_private_endpoint_connections.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_private_endpoint_connections.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections" + } + + +class DiskEncryptionSetsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.ComputeManagementClient`'s + :attr:`disk_encryption_sets` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + def _create_or_update_initial( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSet, IO], + **kwargs: Any + ) -> _models.DiskEncryptionSet: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_encryption_set, (IOBase, bytes)): + _content = disk_encryption_set + else: + _json = self._serialize.body(disk_encryption_set, "DiskEncryptionSet") + + request = build_disk_encryption_sets_create_or_update_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: _models.DiskEncryptionSet, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskEncryptionSet]: + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Put disk + encryption set operation. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskEncryptionSet]: + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Put disk + encryption set operation. Required. + :type disk_encryption_set: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSet, IO], + **kwargs: Any + ) -> LROPoller[_models.DiskEncryptionSet]: + """Creates or updates a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Put disk + encryption set operation. Is either a DiskEncryptionSet type or a IO type. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + disk_encryption_set=disk_encryption_set, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + def _update_initial( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSetUpdate, IO], + **kwargs: Any + ) -> _models.DiskEncryptionSet: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(disk_encryption_set, (IOBase, bytes)): + _content = disk_encryption_set + else: + _json = self._serialize.body(disk_encryption_set, "DiskEncryptionSetUpdate") + + request = build_disk_encryption_sets_update_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @overload + def begin_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: _models.DiskEncryptionSetUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskEncryptionSet]: + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk + encryption set operation. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.DiskEncryptionSet]: + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk + encryption set operation. Required. + :type disk_encryption_set: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, + resource_group_name: str, + disk_encryption_set_name: str, + disk_encryption_set: Union[_models.DiskEncryptionSetUpdate, IO], + **kwargs: Any + ) -> LROPoller[_models.DiskEncryptionSet]: + """Updates (patches) a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk + encryption set operation. Is either a DiskEncryptionSetUpdate type or a IO type. Required. + :type disk_encryption_set: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSetUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either DiskEncryptionSet or the result of + cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + disk_encryption_set=disk_encryption_set, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @distributed_trace + def get(self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any) -> _models.DiskEncryptionSet: + """Gets information about a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DiskEncryptionSet or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskEncryptionSet] = kwargs.pop("cls", None) + + request = build_disk_encryption_sets_get_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("DiskEncryptionSet", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_encryption_sets_delete_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @distributed_trace + def begin_delete(self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any) -> LROPoller[None]: + """Deletes a disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.DiskEncryptionSet"]: + """Lists all the disk encryption sets under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskEncryptionSet or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskEncryptionSetList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_encryption_sets_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets" + } + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.DiskEncryptionSet"]: + """Lists all the disk encryption sets under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskEncryptionSet or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskEncryptionSet] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskEncryptionSetList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_encryption_sets_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets"} + + @distributed_trace + def list_associated_resources( + self, resource_group_name: str, disk_encryption_set_name: str, **kwargs: Any + ) -> Iterable[str]: + """Lists all resources that are encrypted with this disk encryption set. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param disk_encryption_set_name: The name of the disk encryption set that is being created. The + name can't be changed after the disk encryption set is created. Supported characters for the + name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 characters. Required. + :type disk_encryption_set_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either str or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[str] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.ResourceUriList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_encryption_sets_list_associated_resources_request( + resource_group_name=resource_group_name, + disk_encryption_set_name=disk_encryption_set_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_associated_resources.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("ResourceUriList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_associated_resources.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources" + } + + +class DiskRestorePointOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.ComputeManagementClient`'s + :attr:`disk_restore_point` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + @distributed_trace + def get( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + **kwargs: Any + ) -> _models.DiskRestorePoint: + """Get disk restorePoint resource. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: DiskRestorePoint or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.DiskRestorePoint + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskRestorePoint] = kwargs.pop("cls", None) + + request = build_disk_restore_point_get_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("DiskRestorePoint", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}" + } + + @distributed_trace + def list_by_restore_point( + self, resource_group_name: str, restore_point_collection_name: str, vm_restore_point_name: str, **kwargs: Any + ) -> Iterable["_models.DiskRestorePoint"]: + """Lists diskRestorePoints under a vmRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either DiskRestorePoint or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.DiskRestorePoint] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.DiskRestorePointList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_disk_restore_point_list_by_restore_point_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_restore_point.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("DiskRestorePointList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_by_restore_point.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints" + } + + def _grant_access_initial( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> Optional[_models.AccessUri]: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Optional[_models.AccessUri]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(grant_access_data, (IOBase, bytes)): + _content = grant_access_data + else: + _json = self._serialize.body(grant_access_data, "GrantAccessData") + + request = build_disk_restore_point_grant_access_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._grant_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize("AccessUri", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + _grant_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess" + } + + @overload + def begin_grant_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: _models.GrantAccessData, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_grant_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Required. + :type grant_access_data: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_grant_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :param grant_access_data: Access data object supplied in the body of the get disk access + operation. Is either a GrantAccessData type or a IO type. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AccessUri] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._grant_access_initial( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + grant_access_data=grant_access_data, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AccessUri", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_grant_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess" + } + + def _revoke_access_initial( # pylint: disable=inconsistent-return-statements + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_disk_restore_point_revoke_access_request( + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._revoke_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _revoke_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess" + } + + @distributed_trace + def begin_revoke_access( + self, + resource_group_name: str, + restore_point_collection_name: str, + vm_restore_point_name: str, + disk_restore_point_name: str, + **kwargs: Any + ) -> LROPoller[None]: + """Revokes access to a diskRestorePoint. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param restore_point_collection_name: The name of the restore point collection that the disk + restore point belongs. Required. + :type restore_point_collection_name: str + :param vm_restore_point_name: The name of the vm restore point that the disk disk restore point + belongs. Required. + :type vm_restore_point_name: str + :param disk_restore_point_name: The name of the disk restore point created. Required. + :type disk_restore_point_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._revoke_access_initial( # type: ignore + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + vm_restore_point_name=vm_restore_point_name, + disk_restore_point_name=disk_restore_point_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_revoke_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess" + } + + +class SnapshotsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.mgmt.compute.v2023_01_02.ComputeManagementClient`'s + :attr:`snapshots` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version") + + def _create_or_update_initial( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.Snapshot, IO], **kwargs: Any + ) -> _models.Snapshot: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(snapshot, (IOBase, bytes)): + _content = snapshot + else: + _json = self._serialize.body(snapshot, "Snapshot") + + request = build_snapshots_create_or_update_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._create_or_update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _create_or_update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: _models.Snapshot, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Snapshot]: + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk operation. Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.Snapshot + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Snapshot or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_create_or_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Snapshot]: + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk operation. Required. + :type snapshot: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Snapshot or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_create_or_update( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.Snapshot, IO], **kwargs: Any + ) -> LROPoller[_models.Snapshot]: + """Creates or updates a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Put disk operation. Is either a + Snapshot type or a IO type. Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.Snapshot or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Snapshot or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._create_or_update_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + snapshot=snapshot, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Snapshot", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_create_or_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + def _update_initial( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.SnapshotUpdate, IO], **kwargs: Any + ) -> _models.Snapshot: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(snapshot, (IOBase, bytes)): + _content = snapshot + else: + _json = self._serialize.body(snapshot, "SnapshotUpdate") + + request = build_snapshots_update_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._update_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if response.status_code == 200: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if response.status_code == 202: + deserialized = self._deserialize("Snapshot", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + _update_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @overload + def begin_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: _models.SnapshotUpdate, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Snapshot]: + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. + Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.SnapshotUpdate + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Snapshot or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_update( + self, + resource_group_name: str, + snapshot_name: str, + snapshot: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.Snapshot]: + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. + Required. + :type snapshot: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Snapshot or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_update( + self, resource_group_name: str, snapshot_name: str, snapshot: Union[_models.SnapshotUpdate, IO], **kwargs: Any + ) -> LROPoller[_models.Snapshot]: + """Updates (patches) a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param snapshot: Snapshot object supplied in the body of the Patch snapshot operation. Is + either a SnapshotUpdate type or a IO type. Required. + :type snapshot: ~azure.mgmt.compute.v2023_01_02.models.SnapshotUpdate or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either Snapshot or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._update_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + snapshot=snapshot, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("Snapshot", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_update.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @distributed_trace + def get(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> _models.Snapshot: + """Gets information about a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Snapshot or the result of cls(response) + :rtype: ~azure.mgmt.compute.v2023_01_02.models.Snapshot + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.Snapshot] = kwargs.pop("cls", None) + + request = build_snapshots_get_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.get.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = self._deserialize("Snapshot", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + get.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + def _delete_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, snapshot_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_snapshots_delete_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._delete_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202, 204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _delete_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @distributed_trace + def begin_delete(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> LROPoller[None]: + """Deletes a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._delete_initial( # type: ignore + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_delete.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + } + + @distributed_trace + def list_by_resource_group(self, resource_group_name: str, **kwargs: Any) -> Iterable["_models.Snapshot"]: + """Lists snapshots under a resource group. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.SnapshotList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_snapshots_list_by_resource_group_request( + resource_group_name=resource_group_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list_by_resource_group.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list_by_resource_group.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots" + } + + @distributed_trace + def list(self, **kwargs: Any) -> Iterable["_models.Snapshot"]: + """Lists snapshots under a subscription. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: An iterator like instance of either Snapshot or the result of cls(response) + :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2023_01_02.models.Snapshot] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[_models.SnapshotList] = kwargs.pop("cls", None) + + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + request = build_snapshots_list_request( + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self.list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + request.method = "GET" + return request + + def extract_data(pipeline_response): + deserialized = self._deserialize("SnapshotList", pipeline_response) + list_of_elem = deserialized.value + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return deserialized.next_link or None, iter(list_of_elem) + + def get_next(next_link=None): + request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots"} + + def _grant_access_initial( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> Optional[_models.AccessUri]: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[Optional[_models.AccessUri]] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(grant_access_data, (IOBase, bytes)): + _content = grant_access_data + else: + _json = self._serialize.body(grant_access_data, "GrantAccessData") + + request = build_snapshots_grant_access_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + content_type=content_type, + json=_json, + content=_content, + template_url=self._grant_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize("AccessUri", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + + _grant_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess" + } + + @overload + def begin_grant_access( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: _models.GrantAccessData, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param grant_access_data: Access data object supplied in the body of the get snapshot access + operation. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def begin_grant_access( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: IO, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param grant_access_data: Access data object supplied in the body of the get snapshot access + operation. Required. + :type grant_access_data: IO + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def begin_grant_access( + self, + resource_group_name: str, + snapshot_name: str, + grant_access_data: Union[_models.GrantAccessData, IO], + **kwargs: Any + ) -> LROPoller[_models.AccessUri]: + """Grants access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :param grant_access_data: Access data object supplied in the body of the get snapshot access + operation. Is either a GrantAccessData type or a IO type. Required. + :type grant_access_data: ~azure.mgmt.compute.v2023_01_02.models.GrantAccessData or IO + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either AccessUri or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2023_01_02.models.AccessUri] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AccessUri] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._grant_access_initial( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + grant_access_data=grant_access_data, + api_version=api_version, + content_type=content_type, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): + deserialized = self._deserialize("AccessUri", pipeline_response) + if cls: + return cls(pipeline_response, deserialized, {}) + return deserialized + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_grant_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess" + } + + def _revoke_access_initial( # pylint: disable=inconsistent-return-statements + self, resource_group_name: str, snapshot_name: str, **kwargs: Any + ) -> None: + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_snapshots_revoke_access_request( + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + subscription_id=self._config.subscription_id, + api_version=api_version, + template_url=self._revoke_access_initial.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise HttpResponseError(response=response, error_format=ARMErrorFormat) + + if cls: + return cls(pipeline_response, None, {}) + + _revoke_access_initial.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess" + } + + @distributed_trace + def begin_revoke_access(self, resource_group_name: str, snapshot_name: str, **kwargs: Any) -> LROPoller[None]: + """Revokes access to a snapshot. + + :param resource_group_name: The name of the resource group. Required. + :type resource_group_name: str + :param snapshot_name: The name of the snapshot that is being created. The name can't be changed + after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. + The max name length is 80 characters. Required. + :type snapshot_name: str + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword str continuation_token: A continuation token to restart a poller from a saved state. + :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this + operation to not poll, or pass in your own initialized polling object for a personal polling + strategy. + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no + Retry-After header is present. + :return: An instance of LROPoller that returns either None or the result of cls(response) + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2023-01-02")) + cls: ClsType[None] = kwargs.pop("cls", None) + polling: Union[bool, PollingMethod] = kwargs.pop("polling", True) + lro_delay = kwargs.pop("polling_interval", self._config.polling_interval) + cont_token: Optional[str] = kwargs.pop("continuation_token", None) + if cont_token is None: + raw_result = self._revoke_access_initial( # type: ignore + resource_group_name=resource_group_name, + snapshot_name=snapshot_name, + api_version=api_version, + cls=lambda x, y, z: x, + headers=_headers, + params=_params, + **kwargs + ) + kwargs.pop("error_map", None) + + def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: + polling_method: PollingMethod = cast( + PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs) + ) + elif polling is False: + polling_method = cast(PollingMethod, NoPolling()) + else: + polling_method = polling + if cont_token: + return LROPoller.from_continuation_token( + polling_method=polling_method, + continuation_token=cont_token, + client=self._client, + deserialization_callback=get_long_running_output, + ) + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore + + begin_revoke_access.metadata = { + "url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess" + } diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_patch.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_patch.py new file mode 100644 index 0000000000000..f7dd32510333d --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/py.typed b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/py.typed new file mode 100644 index 0000000000000..e5aff4f83af86 --- /dev/null +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_01_02/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_03_01/_version.py b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_03_01/_version.py index 215f19d5391f4..e5754a47ce68f 100644 --- a/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_03_01/_version.py +++ b/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2023_03_01/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "30.0.0" +VERSION = "1.0.0b1"