diff --git a/src/azure-cli/azure/cli/command_modules/vm/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/__init__.py index 7b8669bf35f..4097503d544 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/__init__.py +++ b/src/azure-cli/azure/cli/command_modules/vm/__init__.py @@ -65,4 +65,79 @@ def load_arguments(self, command): pass -COMMAND_LOADER_CLS = ComputeCommandsLoader +class AzureStackComputeCommandsLoader(AzCommandsLoader): + + def __init__(self, cli_ctx=None): + from azure.cli.core.commands import CliCommandType + compute_custom = CliCommandType( + operations_tmpl='azure.cli.command_modules.vm.azure_stack.custom#{}', + operation_group='virtual_machines' + ) + super().__init__(cli_ctx=cli_ctx, + resource_type=ResourceType.MGMT_COMPUTE, + operation_group='virtual_machines', + custom_command_type=compute_custom) + + def load_command_table(self, args): + from azure.cli.command_modules.vm.azure_stack.commands import load_command_table + from azure.cli.core.aaz import load_aaz_command_table + try: + from . import aaz + except ImportError: + aaz = None + + if aaz: + load_aaz_command_table( + loader=self, + aaz_pkg_name=aaz.__name__, + args=args + ) + load_command_table(self, args) + try: + # When generated commands are required uncomment the following two lines. + from .generated.commands import load_command_table as load_command_table_generated + load_command_table_generated(self, args) + from .manual.commands import load_command_table as load_command_table_manual + load_command_table_manual(self, args) + except ImportError: + pass + + profile = self.get_module_by_profile("commands") + if profile and hasattr(profile, 'load_command_table'): + profile.load_command_table(self, args) + + return self.command_table + + def load_arguments(self, command): + from azure.cli.command_modules.vm.azure_stack._params import load_arguments + load_arguments(self, command) + try: + from .generated._params import load_arguments as load_arguments_generated + load_arguments_generated(self, command) + from .manual._params import load_arguments as load_arguments_manual + load_arguments_manual(self, command) + except ImportError: + pass + + profile = self.get_module_by_profile("_params") + if profile and hasattr(profile, 'load_arguments'): + profile.load_arguments(self, command) + + def get_module_name_by_profile(self, module_name): + from azure.cli.core.aaz.utils import get_aaz_profile_module_name + profile_module_name = get_aaz_profile_module_name(profile_name=self.cli_ctx.cloud.profile) + if module_name: + return f'azure.cli.command_modules.vm.azure_stack.{profile_module_name}.{module_name}' + return f'azure.cli.command_modules.vm.azure_stack.{profile_module_name}' + + def get_module_by_profile(self, name): + import importlib + module_name = self.get_module_name_by_profile(name) + return importlib.import_module(module_name) + + +def get_command_loader(cli_ctx): + if cli_ctx.cloud.profile.lower() != "latest": + return AzureStackComputeCommandsLoader + + return ComputeCommandsLoader diff --git a/src/azure-cli/azure/cli/command_modules/vm/_params.py b/src/azure-cli/azure/cli/command_modules/vm/_params.py index a6ca5c7941a..4222457c775 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/_params.py +++ b/src/azure-cli/azure/cli/command_modules/vm/_params.py @@ -883,9 +883,10 @@ def load_arguments(self, _): c.argument('enable_auto_os_upgrade', enable_auto_os_upgrade_type) c.argument('upgrade_policy_mode', help='Specify the mode of an upgrade to virtual machines in the scale set.', arg_type=get_enum_type(UpgradeMode)) - for scope, help_prefix in [('vmss update', 'Update the'), ('vmss wait', 'Wait on the')]: - with self.argument_context(scope) as c: - c.argument('instance_id', id_part='child_name_1', help="{0} VM instance with this ID. If missing, {0} VMSS.".format(help_prefix)) + with self.argument_context('vmss update') as c: + c.argument('instance_id', id_part='child_name_1', help="Update the VM instance with this ID. If missing, update the VMSS.") + with self.argument_context('vmss wait') as c: + c.argument('instance_id', id_part='child_name_1', help="Wait on the VM instance with this ID. If missing, wait on the VMSS.") for scope in ['vmss update-instances', 'vmss delete-instances']: with self.argument_context(scope) as c: @@ -926,15 +927,18 @@ def load_arguments(self, _): arg_type=get_three_state_flag(), help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') + for scope in ['vm', 'vmss']: with self.argument_context('{} run-command'.format(scope)) as c: c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope)) if scope == 'vmss': c.argument('vmss_name', vmss_name_type) + for scope in ['vm', 'vmss']: with self.argument_context('{} run-command invoke'.format(scope)) as c: c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'") c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file") + for scope in ['vm', 'vmss']: with self.argument_context('{} stop'.format(scope)) as c: c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') @@ -1083,6 +1087,7 @@ def load_arguments(self, _): c.argument('os_disk_secure_vm_disk_encryption_set', min_api='2021-11-01', help='Specify the customer managed disk encryption set resource ID or name for the managed disk that is used for customer managed key encrypted Confidential VM OS disk and VM guest blob.') c.argument('disable_integrity_monitoring_autoupgrade', action='store_true', min_api='2020-12-01', help='Disable auto upgrade of guest attestation extension for Trusted Launch enabled VMs and VMSS.') + for scope in ['vm create', 'vmss create']: with self.argument_context(scope, arg_group='Authentication') as c: c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') c.argument('ssh_key_type', arg_type=get_enum_type(['RSA', 'Ed25519']), default='RSA', min_api='2023-09-01', help='Specify the type of SSH public and private key files to be generated if missing.') @@ -1092,6 +1097,7 @@ def load_arguments(self, _): c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.') c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) + for scope in ['vm create', 'vmss create']: with self.argument_context(scope, arg_group='Storage') as c: if DiskStorageAccountTypes: allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes]) @@ -1129,6 +1135,7 @@ def load_arguments(self, _): c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') + for scope in ['vm create', 'vmss create']: with self.argument_context(scope, arg_group='Network') as c: c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') @@ -1149,6 +1156,7 @@ def load_arguments(self, _): 'value to apply on all resources, or use = to configure ' 'the delete behavior for individual resources. Possible options are Delete and Detach.') + for scope in ['vm create', 'vmss create']: with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: c.argument('plan_name', help='plan name') c.argument('plan_product', help='plan product') @@ -1528,13 +1536,29 @@ def load_arguments(self, _): c.argument('ppg_type', options_list=['--type', '-t'], arg_type=get_enum_type(self.get_models('ProximityPlacementGroupType')), min_api='2018-04-01', help="The type of the proximity placement group.") c.argument('intent_vm_sizes', nargs='*', min_api='2021-11-01', help="Specify possible sizes of virtual machines that can be created in the proximity placement group.") - for scope, item in [('vm create', 'VM'), ('vmss create', 'VMSS'), - ('vm availability-set create', 'availability set'), - ('vm update', 'VM'), ('vmss update', 'VMSS'), - ('vm availability-set update', 'availability set')]: - with self.argument_context(scope, min_api='2018-04-01') as c: - c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the {} should be associated with.".format(item), - validator=_validate_proximity_placement_group) # only availability set does not have a command level validator, so this should be added. + with self.argument_context('vm create', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the VM should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vmss create', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the VMSS should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vm availability-set create', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the availability set should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vm update', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the VM should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vmss update', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the VMSS should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vm availability-set update', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], help="The name or ID of the proximity placement group the availability set should be associated with.", + validator=_validate_proximity_placement_group) # endregion # region VM Monitor diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/capacity/reservation/group/_list.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/capacity/reservation/group/_list.py index c571fbaeb0b..0d8796f5f6e 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/capacity/reservation/group/_list.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/capacity/reservation/group/_list.py @@ -20,7 +20,7 @@ class List(AAZCommand): :example: List capacity reservation groups az capacity reservation group list -g rg - :example: List the capacity reservation groups containing VM instances and VMSS instance which are associated to capacity reservation group + :example: List the capacity reservation groups containing VM instances and VMSS instance which are associated to capacity reservation group az capacity reservation group list -g rg --vm-instance --vmss-instance """ @@ -200,6 +200,7 @@ def _build_schema_on_200(cls): ) properties.instance_view = AAZObjectType( serialized_name="instanceView", + flags={"read_only": True}, ) properties.sharing_profile = AAZObjectType( serialized_name="sharingProfile", @@ -404,6 +405,7 @@ def _build_schema_on_200(cls): ) properties.instance_view = AAZObjectType( serialized_name="instanceView", + flags={"read_only": True}, ) properties.sharing_profile = AAZObjectType( serialized_name="sharingProfile", diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/disk/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/disk/__cmd_group.py index a3d98c8be7c..ef273230237 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/disk/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/disk/__cmd_group.py @@ -18,7 +18,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Managed Disks. Azure Virtual Machines use disks as a place to store an operating system, applications, and data. All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk. The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs) stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs. - Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview) + Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://learn.microsoft.com/azure/virtual-machines/managed-disks-overview) """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/nsg/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/nsg/__cmd_group.py index 79bec603989..22ad607bc90 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/nsg/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/nsg/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Network Security Groups (NSGs). - You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://docs.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. + You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://learn.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/public_ip/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/public_ip/__cmd_group.py index 569e6343494..2557faf7887 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/public_ip/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/public_ip/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage public IP addresses. - To learn more about public IP addresses visit https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. + To learn more about public IP addresses visit https://learn.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/__cmd_group.py index 4099513f8aa..73d2004eb91 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Check if a private IP address is available for use within a virtual network. - To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network. + To learn more about Virtual Networks visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-network. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/subnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/subnet/__cmd_group.py index 33a5960d8ee..748734c3253 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/subnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/network/vnet/subnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage subnets in an Azure Virtual Network. - To learn more about subnets visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. + To learn more about subnets visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/disk/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/disk/__cmd_group.py index baf7883699a..0715836856f 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/disk/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/latest/vm/disk/__cmd_group.py @@ -23,10 +23,10 @@ class __CMDGroup(AAZCommandGroup): Azure Managed and Unmanaged Data Disks have a maximum size of 4095 GB (with the exception of larger disks in preview). Azure Unmanaged Disks also have a maximum capacity of 4095 GB. For more information, see: - - Azure Disks - https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview. + - Azure Disks - https://learn.microsoft.com/azure/virtual-machines/managed-disks-overview. - Larger Managed Disks in Public Preview - https://azure.microsoft.com/blog/introducing-the- public-preview-of-larger-managed-disks-sizes/ - - Ultra SSD Managed Disks in Public Preview - https://docs.microsoft.com/azure/virtual- + - Ultra SSD Managed Disks in Public Preview - https://learn.microsoft.com/azure/virtual- machines/disks-types. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/__cmd_group.py new file mode 100644 index 00000000000..a10530cf097 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/__init__.py new file mode 100644 index 00000000000..5a9d61963d6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/__cmd_group.py new file mode 100644 index 00000000000..58d1554a9a5 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity reservation", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity reservation. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/__init__.py new file mode 100644 index 00000000000..5a9d61963d6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/__cmd_group.py new file mode 100644 index 00000000000..84dd1dd45a2 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity reservation group", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity reservation group. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/__init__.py new file mode 100644 index 00000000000..d63ae5a6fc9 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._list import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/_list.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/_list.py new file mode 100644 index 00000000000..0d8796f5f6e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/capacity/reservation/group/_list.py @@ -0,0 +1,519 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "capacity reservation group list", +) +class List(AAZCommand): + """List the capacity reservation groups. + + :example: List capacity reservation groups + az capacity reservation group list -g rg + + :example: List the capacity reservation groups containing VM instances and VMSS instance which are associated to capacity reservation group + az capacity reservation group list -g rg --vm-instance --vmss-instance + """ + + _aaz_info = { + "version": "2024-03-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/providers/microsoft.compute/capacityreservationgroups", "2024-03-01"], + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/capacityreservationgroups", "2024-03-01"], + ] + } + + AZ_SUPPORT_PAGINATION = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_paging(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.resource_group = AAZResourceGroupNameArg() + _args_schema.expand = AAZStrArg( + options=["--expand"], + help="The expand expression to apply on the operation. Based on the expand param(s) specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation group in the response.", + enum={"virtualMachineScaleSetVMs/$ref": "virtualMachineScaleSetVMs/$ref", "virtualMachines/$ref": "virtualMachines/$ref"}, + ) + _args_schema.resource_ids_only = AAZStrArg( + options=["--resource-ids-only"], + help="The query option to fetch capacity reservation group resource Ids. 'CreatedInSubscription' enables fetching resource Ids for all capacity reservation group resources created in the subscription. 'SharedWithSubscription' enables fetching resource Ids for all capacity reservation group resources shared with the subscription. 'All' enables fetching resource Ids for all capacity reservation group resources shared with the subscription and created in the subscription.", + enum={"All": "All", "CreatedInSubscription": "CreatedInSubscription", "SharedWithSubscription": "SharedWithSubscription"}, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + condition_0 = has_value(self.ctx.subscription_id) and has_value(self.ctx.args.resource_group) is not True + condition_1 = has_value(self.ctx.args.resource_group) and has_value(self.ctx.subscription_id) + if condition_0: + self.CapacityReservationGroupsListBySubscription(ctx=self.ctx)() + if condition_1: + self.CapacityReservationGroupsListByResourceGroup(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link + + class CapacityReservationGroupsListBySubscription(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "$expand", self.ctx.args.expand, + ), + **self.serialize_query_param( + "resourceIdsOnly", self.ctx.args.resource_ids_only, + ), + **self.serialize_query_param( + "api-version", "2024-03-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType( + flags={"required": True}, + ) + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.location = AAZStrType( + flags={"required": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.tags = AAZDictType() + _element.type = AAZStrType( + flags={"read_only": True}, + ) + _element.zones = AAZListType() + + properties = cls._schema_on_200.value.Element.properties + properties.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.sharing_profile = AAZObjectType( + serialized_name="sharingProfile", + ) + properties.virtual_machines_associated = AAZListType( + serialized_name="virtualMachinesAssociated", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.capacity_reservations + capacity_reservations.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(capacity_reservations.Element) + + instance_view = cls._schema_on_200.value.Element.properties.instance_view + instance_view.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + instance_view.shared_subscription_ids = AAZListType( + serialized_name="sharedSubscriptionIds", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations + capacity_reservations.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.statuses = AAZListType() + _element.utilization_info = AAZObjectType( + serialized_name="utilizationInfo", + ) + + statuses = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + utilization_info = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info + utilization_info.current_capacity = AAZIntType( + serialized_name="currentCapacity", + flags={"read_only": True}, + ) + utilization_info.virtual_machines_allocated = AAZListType( + serialized_name="virtualMachinesAllocated", + flags={"read_only": True}, + ) + + virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info.virtual_machines_allocated + virtual_machines_allocated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element) + + shared_subscription_ids = cls._schema_on_200.value.Element.properties.instance_view.shared_subscription_ids + shared_subscription_ids.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(shared_subscription_ids.Element) + + sharing_profile = cls._schema_on_200.value.Element.properties.sharing_profile + sharing_profile.subscription_ids = AAZListType( + serialized_name="subscriptionIds", + ) + + subscription_ids = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids + subscription_ids.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids.Element + _element.id = AAZStrType() + + virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated + virtual_machines_associated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element) + + tags = cls._schema_on_200.value.Element.tags + tags.Element = AAZStrType() + + zones = cls._schema_on_200.value.Element.zones + zones.Element = AAZStrType() + + return cls._schema_on_200 + + class CapacityReservationGroupsListByResourceGroup(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "$expand", self.ctx.args.expand, + ), + **self.serialize_query_param( + "api-version", "2024-03-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType( + flags={"required": True}, + ) + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.location = AAZStrType( + flags={"required": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.tags = AAZDictType() + _element.type = AAZStrType( + flags={"read_only": True}, + ) + _element.zones = AAZListType() + + properties = cls._schema_on_200.value.Element.properties + properties.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.sharing_profile = AAZObjectType( + serialized_name="sharingProfile", + ) + properties.virtual_machines_associated = AAZListType( + serialized_name="virtualMachinesAssociated", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.capacity_reservations + capacity_reservations.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(capacity_reservations.Element) + + instance_view = cls._schema_on_200.value.Element.properties.instance_view + instance_view.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + instance_view.shared_subscription_ids = AAZListType( + serialized_name="sharedSubscriptionIds", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations + capacity_reservations.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.statuses = AAZListType() + _element.utilization_info = AAZObjectType( + serialized_name="utilizationInfo", + ) + + statuses = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + utilization_info = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info + utilization_info.current_capacity = AAZIntType( + serialized_name="currentCapacity", + flags={"read_only": True}, + ) + utilization_info.virtual_machines_allocated = AAZListType( + serialized_name="virtualMachinesAllocated", + flags={"read_only": True}, + ) + + virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info.virtual_machines_allocated + virtual_machines_allocated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element) + + shared_subscription_ids = cls._schema_on_200.value.Element.properties.instance_view.shared_subscription_ids + shared_subscription_ids.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(shared_subscription_ids.Element) + + sharing_profile = cls._schema_on_200.value.Element.properties.sharing_profile + sharing_profile.subscription_ids = AAZListType( + serialized_name="subscriptionIds", + ) + + subscription_ids = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids + subscription_ids.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids.Element + _element.id = AAZStrType() + + virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated + virtual_machines_associated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element) + + tags = cls._schema_on_200.value.Element.tags + tags.Element = AAZStrType() + + zones = cls._schema_on_200.value.Element.zones + zones.Element = AAZStrType() + + return cls._schema_on_200 + + +class _ListHelper: + """Helper class for List""" + + _schema_sub_resource_read_only_read = None + + @classmethod + def _build_schema_sub_resource_read_only_read(cls, _schema): + if cls._schema_sub_resource_read_only_read is not None: + _schema.id = cls._schema_sub_resource_read_only_read.id + return + + cls._schema_sub_resource_read_only_read = _schema_sub_resource_read_only_read = AAZObjectType() + + sub_resource_read_only_read = _schema_sub_resource_read_only_read + sub_resource_read_only_read.id = AAZStrType( + flags={"read_only": True}, + ) + + _schema.id = cls._schema_sub_resource_read_only_read.id + + +__all__ = ["List"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/disk/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/disk/__cmd_group.py index a3d98c8be7c..ef273230237 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/disk/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/disk/__cmd_group.py @@ -18,7 +18,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Managed Disks. Azure Virtual Machines use disks as a place to store an operating system, applications, and data. All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk. The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs) stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs. - Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview) + Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://learn.microsoft.com/azure/virtual-machines/managed-disks-overview) """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/nsg/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/nsg/__cmd_group.py index 79bec603989..22ad607bc90 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/nsg/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/nsg/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Network Security Groups (NSGs). - You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://docs.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. + You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://learn.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/public_ip/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/public_ip/__cmd_group.py index 569e6343494..2557faf7887 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/public_ip/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/public_ip/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage public IP addresses. - To learn more about public IP addresses visit https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. + To learn more about public IP addresses visit https://learn.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/__cmd_group.py index 4099513f8aa..73d2004eb91 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Check if a private IP address is available for use within a virtual network. - To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network. + To learn more about Virtual Networks visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-network. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/subnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/subnet/__cmd_group.py index 33a5960d8ee..748734c3253 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/subnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2018_03_01_hybrid/network/vnet/subnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage subnets in an Azure Virtual Network. - To learn more about subnets visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. + To learn more about subnets visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/__cmd_group.py new file mode 100644 index 00000000000..a10530cf097 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/__init__.py new file mode 100644 index 00000000000..5a9d61963d6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/__cmd_group.py new file mode 100644 index 00000000000..58d1554a9a5 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity reservation", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity reservation. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/__init__.py new file mode 100644 index 00000000000..5a9d61963d6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/__cmd_group.py new file mode 100644 index 00000000000..84dd1dd45a2 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity reservation group", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity reservation group. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/__init__.py new file mode 100644 index 00000000000..d63ae5a6fc9 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._list import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/_list.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/_list.py new file mode 100644 index 00000000000..0d8796f5f6e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/capacity/reservation/group/_list.py @@ -0,0 +1,519 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "capacity reservation group list", +) +class List(AAZCommand): + """List the capacity reservation groups. + + :example: List capacity reservation groups + az capacity reservation group list -g rg + + :example: List the capacity reservation groups containing VM instances and VMSS instance which are associated to capacity reservation group + az capacity reservation group list -g rg --vm-instance --vmss-instance + """ + + _aaz_info = { + "version": "2024-03-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/providers/microsoft.compute/capacityreservationgroups", "2024-03-01"], + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/capacityreservationgroups", "2024-03-01"], + ] + } + + AZ_SUPPORT_PAGINATION = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_paging(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.resource_group = AAZResourceGroupNameArg() + _args_schema.expand = AAZStrArg( + options=["--expand"], + help="The expand expression to apply on the operation. Based on the expand param(s) specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation group in the response.", + enum={"virtualMachineScaleSetVMs/$ref": "virtualMachineScaleSetVMs/$ref", "virtualMachines/$ref": "virtualMachines/$ref"}, + ) + _args_schema.resource_ids_only = AAZStrArg( + options=["--resource-ids-only"], + help="The query option to fetch capacity reservation group resource Ids. 'CreatedInSubscription' enables fetching resource Ids for all capacity reservation group resources created in the subscription. 'SharedWithSubscription' enables fetching resource Ids for all capacity reservation group resources shared with the subscription. 'All' enables fetching resource Ids for all capacity reservation group resources shared with the subscription and created in the subscription.", + enum={"All": "All", "CreatedInSubscription": "CreatedInSubscription", "SharedWithSubscription": "SharedWithSubscription"}, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + condition_0 = has_value(self.ctx.subscription_id) and has_value(self.ctx.args.resource_group) is not True + condition_1 = has_value(self.ctx.args.resource_group) and has_value(self.ctx.subscription_id) + if condition_0: + self.CapacityReservationGroupsListBySubscription(ctx=self.ctx)() + if condition_1: + self.CapacityReservationGroupsListByResourceGroup(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link + + class CapacityReservationGroupsListBySubscription(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "$expand", self.ctx.args.expand, + ), + **self.serialize_query_param( + "resourceIdsOnly", self.ctx.args.resource_ids_only, + ), + **self.serialize_query_param( + "api-version", "2024-03-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType( + flags={"required": True}, + ) + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.location = AAZStrType( + flags={"required": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.tags = AAZDictType() + _element.type = AAZStrType( + flags={"read_only": True}, + ) + _element.zones = AAZListType() + + properties = cls._schema_on_200.value.Element.properties + properties.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.sharing_profile = AAZObjectType( + serialized_name="sharingProfile", + ) + properties.virtual_machines_associated = AAZListType( + serialized_name="virtualMachinesAssociated", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.capacity_reservations + capacity_reservations.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(capacity_reservations.Element) + + instance_view = cls._schema_on_200.value.Element.properties.instance_view + instance_view.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + instance_view.shared_subscription_ids = AAZListType( + serialized_name="sharedSubscriptionIds", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations + capacity_reservations.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.statuses = AAZListType() + _element.utilization_info = AAZObjectType( + serialized_name="utilizationInfo", + ) + + statuses = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + utilization_info = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info + utilization_info.current_capacity = AAZIntType( + serialized_name="currentCapacity", + flags={"read_only": True}, + ) + utilization_info.virtual_machines_allocated = AAZListType( + serialized_name="virtualMachinesAllocated", + flags={"read_only": True}, + ) + + virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info.virtual_machines_allocated + virtual_machines_allocated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element) + + shared_subscription_ids = cls._schema_on_200.value.Element.properties.instance_view.shared_subscription_ids + shared_subscription_ids.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(shared_subscription_ids.Element) + + sharing_profile = cls._schema_on_200.value.Element.properties.sharing_profile + sharing_profile.subscription_ids = AAZListType( + serialized_name="subscriptionIds", + ) + + subscription_ids = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids + subscription_ids.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids.Element + _element.id = AAZStrType() + + virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated + virtual_machines_associated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element) + + tags = cls._schema_on_200.value.Element.tags + tags.Element = AAZStrType() + + zones = cls._schema_on_200.value.Element.zones + zones.Element = AAZStrType() + + return cls._schema_on_200 + + class CapacityReservationGroupsListByResourceGroup(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "$expand", self.ctx.args.expand, + ), + **self.serialize_query_param( + "api-version", "2024-03-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType( + flags={"required": True}, + ) + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.location = AAZStrType( + flags={"required": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.tags = AAZDictType() + _element.type = AAZStrType( + flags={"read_only": True}, + ) + _element.zones = AAZListType() + + properties = cls._schema_on_200.value.Element.properties + properties.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.sharing_profile = AAZObjectType( + serialized_name="sharingProfile", + ) + properties.virtual_machines_associated = AAZListType( + serialized_name="virtualMachinesAssociated", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.capacity_reservations + capacity_reservations.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(capacity_reservations.Element) + + instance_view = cls._schema_on_200.value.Element.properties.instance_view + instance_view.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + instance_view.shared_subscription_ids = AAZListType( + serialized_name="sharedSubscriptionIds", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations + capacity_reservations.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.statuses = AAZListType() + _element.utilization_info = AAZObjectType( + serialized_name="utilizationInfo", + ) + + statuses = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + utilization_info = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info + utilization_info.current_capacity = AAZIntType( + serialized_name="currentCapacity", + flags={"read_only": True}, + ) + utilization_info.virtual_machines_allocated = AAZListType( + serialized_name="virtualMachinesAllocated", + flags={"read_only": True}, + ) + + virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info.virtual_machines_allocated + virtual_machines_allocated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element) + + shared_subscription_ids = cls._schema_on_200.value.Element.properties.instance_view.shared_subscription_ids + shared_subscription_ids.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(shared_subscription_ids.Element) + + sharing_profile = cls._schema_on_200.value.Element.properties.sharing_profile + sharing_profile.subscription_ids = AAZListType( + serialized_name="subscriptionIds", + ) + + subscription_ids = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids + subscription_ids.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids.Element + _element.id = AAZStrType() + + virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated + virtual_machines_associated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element) + + tags = cls._schema_on_200.value.Element.tags + tags.Element = AAZStrType() + + zones = cls._schema_on_200.value.Element.zones + zones.Element = AAZStrType() + + return cls._schema_on_200 + + +class _ListHelper: + """Helper class for List""" + + _schema_sub_resource_read_only_read = None + + @classmethod + def _build_schema_sub_resource_read_only_read(cls, _schema): + if cls._schema_sub_resource_read_only_read is not None: + _schema.id = cls._schema_sub_resource_read_only_read.id + return + + cls._schema_sub_resource_read_only_read = _schema_sub_resource_read_only_read = AAZObjectType() + + sub_resource_read_only_read = _schema_sub_resource_read_only_read + sub_resource_read_only_read.id = AAZStrType( + flags={"read_only": True}, + ) + + _schema.id = cls._schema_sub_resource_read_only_read.id + + +__all__ = ["List"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/disk/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/disk/__cmd_group.py index a3d98c8be7c..ef273230237 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/disk/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/disk/__cmd_group.py @@ -18,7 +18,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Managed Disks. Azure Virtual Machines use disks as a place to store an operating system, applications, and data. All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk. The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs) stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs. - Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview) + Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://learn.microsoft.com/azure/virtual-machines/managed-disks-overview) """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/nsg/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/nsg/__cmd_group.py index 79bec603989..22ad607bc90 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/nsg/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/nsg/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Network Security Groups (NSGs). - You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://docs.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. + You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://learn.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/public_ip/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/public_ip/__cmd_group.py index 569e6343494..2557faf7887 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/public_ip/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/public_ip/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage public IP addresses. - To learn more about public IP addresses visit https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. + To learn more about public IP addresses visit https://learn.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/__cmd_group.py index 4099513f8aa..73d2004eb91 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Check if a private IP address is available for use within a virtual network. - To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network. + To learn more about Virtual Networks visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-network. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/subnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/subnet/__cmd_group.py index 33a5960d8ee..748734c3253 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/subnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2019_03_01_hybrid/network/vnet/subnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage subnets in an Azure Virtual Network. - To learn more about subnets visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. + To learn more about subnets visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/__cmd_group.py new file mode 100644 index 00000000000..a10530cf097 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/__init__.py new file mode 100644 index 00000000000..5a9d61963d6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/__cmd_group.py new file mode 100644 index 00000000000..58d1554a9a5 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity reservation", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity reservation. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/__init__.py new file mode 100644 index 00000000000..5a9d61963d6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/__cmd_group.py new file mode 100644 index 00000000000..84dd1dd45a2 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/__cmd_group.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command_group( + "capacity reservation group", +) +class __CMDGroup(AAZCommandGroup): + """Manage capacity reservation group. + """ + pass + + +__all__ = ["__CMDGroup"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/__init__.py new file mode 100644 index 00000000000..d63ae5a6fc9 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from .__cmd_group import * +from ._list import * diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/_list.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/_list.py new file mode 100644 index 00000000000..0d8796f5f6e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/capacity/reservation/group/_list.py @@ -0,0 +1,519 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# +# Code generated by aaz-dev-tools +# -------------------------------------------------------------------------------------------- + +# pylint: skip-file +# flake8: noqa + +from azure.cli.core.aaz import * + + +@register_command( + "capacity reservation group list", +) +class List(AAZCommand): + """List the capacity reservation groups. + + :example: List capacity reservation groups + az capacity reservation group list -g rg + + :example: List the capacity reservation groups containing VM instances and VMSS instance which are associated to capacity reservation group + az capacity reservation group list -g rg --vm-instance --vmss-instance + """ + + _aaz_info = { + "version": "2024-03-01", + "resources": [ + ["mgmt-plane", "/subscriptions/{}/providers/microsoft.compute/capacityreservationgroups", "2024-03-01"], + ["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/capacityreservationgroups", "2024-03-01"], + ] + } + + AZ_SUPPORT_PAGINATION = True + + def _handler(self, command_args): + super()._handler(command_args) + return self.build_paging(self._execute_operations, self._output) + + _args_schema = None + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + if cls._args_schema is not None: + return cls._args_schema + cls._args_schema = super()._build_arguments_schema(*args, **kwargs) + + # define Arg Group "" + + _args_schema = cls._args_schema + _args_schema.resource_group = AAZResourceGroupNameArg() + _args_schema.expand = AAZStrArg( + options=["--expand"], + help="The expand expression to apply on the operation. Based on the expand param(s) specified we return Virtual Machine or ScaleSet VM Instance or both resource Ids which are associated to capacity reservation group in the response.", + enum={"virtualMachineScaleSetVMs/$ref": "virtualMachineScaleSetVMs/$ref", "virtualMachines/$ref": "virtualMachines/$ref"}, + ) + _args_schema.resource_ids_only = AAZStrArg( + options=["--resource-ids-only"], + help="The query option to fetch capacity reservation group resource Ids. 'CreatedInSubscription' enables fetching resource Ids for all capacity reservation group resources created in the subscription. 'SharedWithSubscription' enables fetching resource Ids for all capacity reservation group resources shared with the subscription. 'All' enables fetching resource Ids for all capacity reservation group resources shared with the subscription and created in the subscription.", + enum={"All": "All", "CreatedInSubscription": "CreatedInSubscription", "SharedWithSubscription": "SharedWithSubscription"}, + ) + return cls._args_schema + + def _execute_operations(self): + self.pre_operations() + condition_0 = has_value(self.ctx.subscription_id) and has_value(self.ctx.args.resource_group) is not True + condition_1 = has_value(self.ctx.args.resource_group) and has_value(self.ctx.subscription_id) + if condition_0: + self.CapacityReservationGroupsListBySubscription(ctx=self.ctx)() + if condition_1: + self.CapacityReservationGroupsListByResourceGroup(ctx=self.ctx)() + self.post_operations() + + @register_callback + def pre_operations(self): + pass + + @register_callback + def post_operations(self): + pass + + def _output(self, *args, **kwargs): + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link + + class CapacityReservationGroupsListBySubscription(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "$expand", self.ctx.args.expand, + ), + **self.serialize_query_param( + "resourceIdsOnly", self.ctx.args.resource_ids_only, + ), + **self.serialize_query_param( + "api-version", "2024-03-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType( + flags={"required": True}, + ) + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.location = AAZStrType( + flags={"required": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.tags = AAZDictType() + _element.type = AAZStrType( + flags={"read_only": True}, + ) + _element.zones = AAZListType() + + properties = cls._schema_on_200.value.Element.properties + properties.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.sharing_profile = AAZObjectType( + serialized_name="sharingProfile", + ) + properties.virtual_machines_associated = AAZListType( + serialized_name="virtualMachinesAssociated", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.capacity_reservations + capacity_reservations.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(capacity_reservations.Element) + + instance_view = cls._schema_on_200.value.Element.properties.instance_view + instance_view.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + instance_view.shared_subscription_ids = AAZListType( + serialized_name="sharedSubscriptionIds", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations + capacity_reservations.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.statuses = AAZListType() + _element.utilization_info = AAZObjectType( + serialized_name="utilizationInfo", + ) + + statuses = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + utilization_info = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info + utilization_info.current_capacity = AAZIntType( + serialized_name="currentCapacity", + flags={"read_only": True}, + ) + utilization_info.virtual_machines_allocated = AAZListType( + serialized_name="virtualMachinesAllocated", + flags={"read_only": True}, + ) + + virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info.virtual_machines_allocated + virtual_machines_allocated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element) + + shared_subscription_ids = cls._schema_on_200.value.Element.properties.instance_view.shared_subscription_ids + shared_subscription_ids.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(shared_subscription_ids.Element) + + sharing_profile = cls._schema_on_200.value.Element.properties.sharing_profile + sharing_profile.subscription_ids = AAZListType( + serialized_name="subscriptionIds", + ) + + subscription_ids = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids + subscription_ids.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids.Element + _element.id = AAZStrType() + + virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated + virtual_machines_associated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element) + + tags = cls._schema_on_200.value.Element.tags + tags.Element = AAZStrType() + + zones = cls._schema_on_200.value.Element.zones + zones.Element = AAZStrType() + + return cls._schema_on_200 + + class CapacityReservationGroupsListByResourceGroup(AAZHttpOperation): + CLIENT_TYPE = "MgmtClient" + + def __call__(self, *args, **kwargs): + request = self.make_request() + session = self.client.send_request(request=request, stream=False, **kwargs) + if session.http_response.status_code in [200]: + return self.on_200(session) + + return self.on_error(session.http_response) + + @property + def url(self): + return self.client.format_url( + "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups", + **self.url_parameters + ) + + @property + def method(self): + return "GET" + + @property + def error_format(self): + return "ODataV4Format" + + @property + def url_parameters(self): + parameters = { + **self.serialize_url_param( + "resourceGroupName", self.ctx.args.resource_group, + required=True, + ), + **self.serialize_url_param( + "subscriptionId", self.ctx.subscription_id, + required=True, + ), + } + return parameters + + @property + def query_parameters(self): + parameters = { + **self.serialize_query_param( + "$expand", self.ctx.args.expand, + ), + **self.serialize_query_param( + "api-version", "2024-03-01", + required=True, + ), + } + return parameters + + @property + def header_parameters(self): + parameters = { + **self.serialize_header_param( + "Accept", "application/json", + ), + } + return parameters + + def on_200(self, session): + data = self.deserialize_http_content(session) + self.ctx.set_var( + "instance", + data, + schema_builder=self._build_schema_on_200 + ) + + _schema_on_200 = None + + @classmethod + def _build_schema_on_200(cls): + if cls._schema_on_200 is not None: + return cls._schema_on_200 + + cls._schema_on_200 = AAZObjectType() + + _schema_on_200 = cls._schema_on_200 + _schema_on_200.next_link = AAZStrType( + serialized_name="nextLink", + ) + _schema_on_200.value = AAZListType( + flags={"required": True}, + ) + + value = cls._schema_on_200.value + value.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element + _element.id = AAZStrType( + flags={"read_only": True}, + ) + _element.location = AAZStrType( + flags={"required": True}, + ) + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.properties = AAZObjectType( + flags={"client_flatten": True}, + ) + _element.tags = AAZDictType() + _element.type = AAZStrType( + flags={"read_only": True}, + ) + _element.zones = AAZListType() + + properties = cls._schema_on_200.value.Element.properties + properties.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + properties.instance_view = AAZObjectType( + serialized_name="instanceView", + flags={"read_only": True}, + ) + properties.sharing_profile = AAZObjectType( + serialized_name="sharingProfile", + ) + properties.virtual_machines_associated = AAZListType( + serialized_name="virtualMachinesAssociated", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.capacity_reservations + capacity_reservations.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(capacity_reservations.Element) + + instance_view = cls._schema_on_200.value.Element.properties.instance_view + instance_view.capacity_reservations = AAZListType( + serialized_name="capacityReservations", + flags={"read_only": True}, + ) + instance_view.shared_subscription_ids = AAZListType( + serialized_name="sharedSubscriptionIds", + flags={"read_only": True}, + ) + + capacity_reservations = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations + capacity_reservations.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element + _element.name = AAZStrType( + flags={"read_only": True}, + ) + _element.statuses = AAZListType() + _element.utilization_info = AAZObjectType( + serialized_name="utilizationInfo", + ) + + statuses = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses + statuses.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.statuses.Element + _element.code = AAZStrType() + _element.display_status = AAZStrType( + serialized_name="displayStatus", + ) + _element.level = AAZStrType() + _element.message = AAZStrType() + _element.time = AAZStrType() + + utilization_info = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info + utilization_info.current_capacity = AAZIntType( + serialized_name="currentCapacity", + flags={"read_only": True}, + ) + utilization_info.virtual_machines_allocated = AAZListType( + serialized_name="virtualMachinesAllocated", + flags={"read_only": True}, + ) + + virtual_machines_allocated = cls._schema_on_200.value.Element.properties.instance_view.capacity_reservations.Element.utilization_info.virtual_machines_allocated + virtual_machines_allocated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_allocated.Element) + + shared_subscription_ids = cls._schema_on_200.value.Element.properties.instance_view.shared_subscription_ids + shared_subscription_ids.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(shared_subscription_ids.Element) + + sharing_profile = cls._schema_on_200.value.Element.properties.sharing_profile + sharing_profile.subscription_ids = AAZListType( + serialized_name="subscriptionIds", + ) + + subscription_ids = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids + subscription_ids.Element = AAZObjectType() + + _element = cls._schema_on_200.value.Element.properties.sharing_profile.subscription_ids.Element + _element.id = AAZStrType() + + virtual_machines_associated = cls._schema_on_200.value.Element.properties.virtual_machines_associated + virtual_machines_associated.Element = AAZObjectType() + _ListHelper._build_schema_sub_resource_read_only_read(virtual_machines_associated.Element) + + tags = cls._schema_on_200.value.Element.tags + tags.Element = AAZStrType() + + zones = cls._schema_on_200.value.Element.zones + zones.Element = AAZStrType() + + return cls._schema_on_200 + + +class _ListHelper: + """Helper class for List""" + + _schema_sub_resource_read_only_read = None + + @classmethod + def _build_schema_sub_resource_read_only_read(cls, _schema): + if cls._schema_sub_resource_read_only_read is not None: + _schema.id = cls._schema_sub_resource_read_only_read.id + return + + cls._schema_sub_resource_read_only_read = _schema_sub_resource_read_only_read = AAZObjectType() + + sub_resource_read_only_read = _schema_sub_resource_read_only_read + sub_resource_read_only_read.id = AAZStrType( + flags={"read_only": True}, + ) + + _schema.id = cls._schema_sub_resource_read_only_read.id + + +__all__ = ["List"] diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/disk/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/disk/__cmd_group.py index a3d98c8be7c..ef273230237 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/disk/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/disk/__cmd_group.py @@ -18,7 +18,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Managed Disks. Azure Virtual Machines use disks as a place to store an operating system, applications, and data. All Azure virtual machines have at least two disks: An operating system disk, and a temporary disk. The operating system disk is created from an image, and both the operating system disk and the image are actually virtual hard disks (VHDs) stored in an Azure storage account. Virtual machines also can have one or more data disks, that are also stored as VHDs. - Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview) + Azure Unmanaged Data Disks have a maximum size of 4095 GB. To use disks larger than 4095 GB use [Azure Managed Disks](https://learn.microsoft.com/azure/virtual-machines/managed-disks-overview) """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/nsg/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/nsg/__cmd_group.py index 79bec603989..22ad607bc90 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/nsg/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/nsg/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage Azure Network Security Groups (NSGs). - You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://docs.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. + You can control network traffic to resources in a virtual network using a network security group. A network security group contains a list of security rules that allow or deny inbound or outbound network traffic based on source or destination IP addresses, Application Security Groups, ports, and protocols. For more information visit https://learn.microsoft.com/azure/virtual-network/virtual-networks-create-nsg-arm-cli. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/public_ip/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/public_ip/__cmd_group.py index 569e6343494..2557faf7887 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/public_ip/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/public_ip/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage public IP addresses. - To learn more about public IP addresses visit https://docs.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. + To learn more about public IP addresses visit https://learn.microsoft.com/azure/virtual-network/virtual-network-public-ip-address. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/__cmd_group.py index 4099513f8aa..73d2004eb91 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Check if a private IP address is available for use within a virtual network. - To learn more about Virtual Networks visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-network. + To learn more about Virtual Networks visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-network. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/subnet/__cmd_group.py b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/subnet/__cmd_group.py index 33a5960d8ee..748734c3253 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/subnet/__cmd_group.py +++ b/src/azure-cli/azure/cli/command_modules/vm/aaz/profile_2020_09_01_hybrid/network/vnet/subnet/__cmd_group.py @@ -14,7 +14,7 @@ class __CMDGroup(AAZCommandGroup): """Manage subnets in an Azure Virtual Network. - To learn more about subnets visit https://docs.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. + To learn more about subnets visit https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-subnet. """ pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_actions.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_actions.py new file mode 100644 index 00000000000..8b70419c208 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_actions.py @@ -0,0 +1,282 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# +# Generation mode: Incremental +# -------------------------------------------------------------------------- + +# pylint: disable=too-many-statements +import json + +from knack.log import get_logger +from knack.util import CLIError + +from azure.cli.core.commands.arm import resource_exists +from azure.cli.core.commands.parameters import get_one_of_subscription_locations +from ._client_factory import _compute_client_factory + +try: + from ..manual.action import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import +except ImportError: + pass + +logger = get_logger(__name__) + + +def _resource_not_exists(cli_ctx, resource_type): + def _handle_resource_not_exists(namespace): + ns, t = resource_type.split('/') + # pylint: disable=protected-access + if resource_exists(cli_ctx, namespace._subscription_id, namespace.resource_group_name, namespace.name, ns, t): + raise CLIError('Resource {} of type {} in group {} already exists.'.format( + namespace.name, + resource_type, + namespace.resource_group_name)) + + return _handle_resource_not_exists + + +def _get_thread_count(): + return 5 # don't increase too much till https://github.com/Azure/msrestazure-for-python/issues/6 is fixed + + +def load_images_thru_services(cli_ctx, publisher, offer, sku, location, edge_zone, architecture): + from concurrent.futures import ThreadPoolExecutor, as_completed + + all_images = [] + client = _compute_client_factory(cli_ctx) + if location is None: + location = get_one_of_subscription_locations(cli_ctx) + + def _load_images_from_publisher(publisher): + from azure.core.exceptions import ResourceNotFoundError + try: + if edge_zone is not None: + offers = edge_zone_client.list_offers(location=location, edge_zone=edge_zone, publisher_name=publisher) + else: + offers = client.virtual_machine_images.list_offers(location=location, publisher_name=publisher) + except ResourceNotFoundError as e: + logger.warning(str(e)) + return + if offer: + offers = [o for o in offers if _matched(offer, o.name)] + for o in offers: + try: + if edge_zone is not None: + skus = edge_zone_client.list_skus(location=location, edge_zone=edge_zone, + publisher_name=publisher, offer=o.name) + else: + skus = client.virtual_machine_images.list_skus(location=location, publisher_name=publisher, + offer=o.name) + except ResourceNotFoundError as e: + logger.warning(str(e)) + continue + if sku: + skus = [s for s in skus if _matched(sku, s.name)] + for s in skus: + try: + expand = "properties/imageDeprecationStatus" + if edge_zone is not None: + images = edge_zone_client.list(location=location, edge_zone=edge_zone, publisher_name=publisher, + offer=o.name, skus=s.name, expand=expand) + else: + images = client.virtual_machine_images.list(location=location, publisher_name=publisher, + offer=o.name, skus=s.name, expand=expand) + except ResourceNotFoundError as e: + logger.warning(str(e)) + continue + for i in images: + image_info = { + 'publisher': publisher, + 'offer': o.name, + 'sku': s.name, + 'version': i.name, + 'architecture': i.additional_properties.get("properties", {}).get("architecture", None) or "", + 'imageDeprecationStatus': i.additional_properties.get( + "properties", {}).get("imageDeprecationStatus", {}) or "" + } + if edge_zone is not None: + image_info['edge_zone'] = edge_zone + if architecture and architecture != image_info['architecture']: + continue + all_images.append(image_info) + + if edge_zone is not None: + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + edge_zone_client = get_mgmt_service_client(cli_ctx, + ResourceType.MGMT_COMPUTE).virtual_machine_images_edge_zone + publishers = edge_zone_client.list_publishers(location=location, edge_zone=edge_zone) + else: + publishers = client.virtual_machine_images.list_publishers(location=location) + if publisher: + publishers = [p for p in publishers if _matched(publisher, p.name)] + + publisher_num = len(publishers) + if publisher_num > 1: + with ThreadPoolExecutor(max_workers=_get_thread_count()) as executor: + tasks = [executor.submit(_load_images_from_publisher, p.name) for p in publishers] + for t in as_completed(tasks): + t.result() # don't use the result but expose exceptions from the threads + elif publisher_num == 1: + _load_images_from_publisher(publishers[0].name) + + return all_images + + +def load_images_from_aliases_doc(cli_ctx, publisher=None, offer=None, sku=None, architecture=None): + import requests + from azure.cli.core.cloud import CloudEndpointNotSetException + from azure.cli.core.util import should_disable_connection_verify + from azure.cli.command_modules.vm._alias import alias_json + try: + target_url = cli_ctx.cloud.endpoints.vm_image_alias_doc + except CloudEndpointNotSetException: + logger.warning("'endpoint_vm_image_alias_doc' isn't configured. Please invoke 'az cloud update' to configure " + "it or use '--all' to retrieve images from server. Use local copy instead.") + dic = json.loads(alias_json) + else: + # under hack mode(say through proxies with unsigned cert), opt out the cert verification + try: + response = requests.get(target_url, verify=(not should_disable_connection_verify())) + if response.status_code == 200: + dic = json.loads(response.content.decode()) + else: + logger.warning("Failed to retrieve image alias doc '%s'. Error: '%s'. Use local copy instead.", + target_url, response) + dic = json.loads(alias_json) + except requests.exceptions.ConnectionError: + logger.warning("Failed to retrieve image alias doc '%s'. Error: 'ConnectionError'. Use local copy instead.", + target_url) + dic = json.loads(alias_json) + try: + all_images = [] + result = (dic['outputs']['aliases']['value']) + for v in result.values(): # loop around os + for alias, vv in v.items(): # loop around distros + all_images.append({ + 'urnAlias': alias, + 'publisher': vv['publisher'], + 'offer': vv['offer'], + 'sku': vv['sku'], + 'version': vv['version'], + 'architecture': vv['architecture'] + }) + + all_images = [i for i in all_images if (_matched(publisher, i['publisher']) and + _matched(offer, i['offer']) and + _matched(sku, i['sku']) and + _matched(architecture, i['architecture']))] + return all_images + except KeyError: + raise CLIError('Could not retrieve image list from {} or local copy'.format(target_url)) + + +def load_extension_images_thru_services(cli_ctx, publisher, name, version, location, + show_latest=False, partial_match=True): + from concurrent.futures import ThreadPoolExecutor, as_completed + from packaging.version import parse # pylint: disable=no-name-in-module,import-error + all_images = [] + client = _compute_client_factory(cli_ctx) + if location is None: + location = get_one_of_subscription_locations(cli_ctx) + + def _load_extension_images_from_publisher(publisher): + from azure.core.exceptions import ResourceNotFoundError + try: + types = client.virtual_machine_extension_images.list_types(location, publisher) + except ResourceNotFoundError as e: + # PIR image publishers might not have any extension images, exception could raise + logger.warning(str(e)) + types = [] + if name: + types = [t for t in types if _matched(name, t.name, partial_match)] + for t in types: + try: + versions = client.virtual_machine_extension_images.list_versions( + location, publisher, t.name) + except ResourceNotFoundError as e: + logger.warning(str(e)) + continue + if version: + versions = [v for v in versions if _matched(version, v.name, partial_match)] + + if show_latest: + # pylint: disable=no-member + versions.sort(key=lambda v: parse(v.name), reverse=True) + try: + all_images.append({ + 'publisher': publisher, + 'name': t.name, + 'version': versions[0].name}) + except IndexError: + pass # if no versions for this type continue to next type. + else: + for v in versions: + all_images.append({ + 'publisher': publisher, + 'name': t.name, + 'version': v.name}) + + publishers = client.virtual_machine_images.list_publishers(location=location) + if publisher: + publishers = [p for p in publishers if _matched(publisher, p.name, partial_match)] + + publisher_num = len(publishers) + if publisher_num > 1: + with ThreadPoolExecutor(max_workers=_get_thread_count()) as executor: + tasks = [executor.submit(_load_extension_images_from_publisher, + p.name) for p in publishers] + for t in as_completed(tasks): + t.result() # don't use the result but expose exceptions from the threads + elif publisher_num == 1: + _load_extension_images_from_publisher(publishers[0].name) + + return all_images + + +def get_vm_sizes(cli_ctx, location): + return list(_compute_client_factory(cli_ctx).virtual_machine_sizes.list(location)) + + +def _matched(pattern, string, partial_match=True): + if not pattern: + return True # empty pattern means wildcard-match + pattern, string = pattern.lower(), string.lower() + return pattern in string if partial_match else pattern == string + + +def _create_image_instance(publisher, offer, sku, version): + return { + 'publisher': publisher, + 'offer': offer, + 'sku': sku, + 'version': version + } + + +def _get_latest_image_version(cli_ctx, location, publisher, offer, sku, edge_zone=None): + from azure.cli.core.azclierror import InvalidArgumentValueError + if edge_zone is not None: + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + edge_zone_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE).virtual_machine_images_edge_zone + top_one = edge_zone_client.list(location, edge_zone, publisher, offer, sku, top=1, orderby='name desc') + if not top_one: + raise InvalidArgumentValueError("Can't resolve the version of '{}:{}:{}:{}'" + .format(publisher, offer, sku, edge_zone)) + else: + top_one = _compute_client_factory(cli_ctx).virtual_machine_images.list(location, + publisher, + offer, + sku, + top=1, + orderby='name desc') + if not top_one: + raise InvalidArgumentValueError("Can't resolve the version of '{}:{}:{}'".format(publisher, offer, sku)) + return top_one[0].name diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_client_factory.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_client_factory.py new file mode 100644 index 00000000000..73f66608f9b --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_client_factory.py @@ -0,0 +1,201 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +def _compute_client_factory(cli_ctx, **kwargs): + from azure.cli.core.profiles import ResourceType + from azure.cli.core.commands.client_factory import get_mgmt_service_client + return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_COMPUTE, + subscription_id=kwargs.get('subscription_id'), + aux_subscriptions=kwargs.get('aux_subscriptions')) + + +def cf_avail_set(cli_ctx, _): + return _compute_client_factory(cli_ctx).availability_sets + + +def cf_vm(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machines + + +def cf_vm_ext(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_extensions + + +def cf_vm_ext_image(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_extension_images + + +def cf_vm_image(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_images + + +def cf_vm_image_term(cli_ctx, _): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements + market_place_client = get_mgmt_service_client(cli_ctx, MarketplaceOrderingAgreements) + return market_place_client.marketplace_agreements + + +def cf_usage(cli_ctx, _): + return _compute_client_factory(cli_ctx).usage + + +def cf_vmss(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_scale_sets + + +def cf_vmss_vm(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_scale_set_vms + + +def cf_vm_sizes(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_sizes + + +def cf_disks(cli_ctx, _): + return _compute_client_factory(cli_ctx).disks + + +def cf_snapshots(cli_ctx, _): + return _compute_client_factory(cli_ctx).snapshots + + +def cf_disk_accesses(cli_ctx, _): + return _compute_client_factory(cli_ctx).disk_accesses + + +def cf_images(cli_ctx, _): + return _compute_client_factory(cli_ctx).images + + +def cf_run_commands(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_run_commands + + +def cf_vmss_run_commands(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_scale_set_vm_run_commands + + +def cf_rolling_upgrade_commands(cli_ctx, _): + return _compute_client_factory(cli_ctx).virtual_machine_scale_set_rolling_upgrades + + +def cf_galleries(cli_ctx, _): + return _compute_client_factory(cli_ctx).galleries + + +def cf_gallery_images(cli_ctx, _): + return _compute_client_factory(cli_ctx).gallery_images + + +def cf_gallery_image_versions(cli_ctx, _): + return _compute_client_factory(cli_ctx).gallery_image_versions + + +def cf_gallery_application(cli_ctx, *_): + return _compute_client_factory(cli_ctx).gallery_applications + + +def cf_gallery_application_version(cli_ctx, *_): + return _compute_client_factory(cli_ctx).gallery_application_versions + + +def cf_proximity_placement_groups(cli_ctx, _): + return _compute_client_factory(cli_ctx).proximity_placement_groups + + +def cf_dedicated_hosts(cli_ctx, _): + return _compute_client_factory(cli_ctx).dedicated_hosts + + +def cf_dedicated_host_groups(cli_ctx, _): + return _compute_client_factory(cli_ctx).dedicated_host_groups + + +def _log_analytics_client_factory(cli_ctx, subscription_id, *_): + from azure.mgmt.loganalytics import LogAnalyticsManagementClient + from azure.cli.core.commands.client_factory import get_mgmt_service_client + return get_mgmt_service_client(cli_ctx, LogAnalyticsManagementClient, subscription_id=subscription_id) + + +def cf_log_analytics(cli_ctx, subscription_id, *_): + return _log_analytics_client_factory(cli_ctx, subscription_id) + + +def cf_log_analytics_data_sources(cli_ctx, subscription_id, *_): + return _log_analytics_client_factory(cli_ctx, subscription_id).data_sources + + +def cf_log_analytics_data_plane(cli_ctx, _): + """Initialize Log Analytics data client for use with CLI.""" + from azure.monitor.query import LogsQueryClient + from azure.cli.core._profile import Profile + profile = Profile(cli_ctx=cli_ctx) + cred, _, _ = profile.get_login_credentials( + resource=cli_ctx.cloud.endpoints.log_analytics_resource_id) + api_version = 'v1' + return LogsQueryClient(cred, endpoint=cli_ctx.cloud.endpoints.log_analytics_resource_id + '/' + api_version) + + +def cf_disk_encryption_set(cli_ctx, _): + return _compute_client_factory(cli_ctx).disk_encryption_sets + + +def _dev_test_labs_client_factory(cli_ctx, subscription_id, *_): + from azure.mgmt.devtestlabs import DevTestLabsClient + from azure.cli.core.commands.client_factory import get_mgmt_service_client + return get_mgmt_service_client(cli_ctx, DevTestLabsClient, subscription_id=subscription_id) + + +def cf_vm_cl(cli_ctx, *_): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.mgmt.compute import ComputeManagementClient + return get_mgmt_service_client(cli_ctx, + ComputeManagementClient) + + +def cf_shared_galleries(cli_ctx, *_): + return cf_vm_cl(cli_ctx).shared_galleries + + +def cf_gallery_sharing_profile(cli_ctx, *_): + return cf_vm_cl(cli_ctx).gallery_sharing_profile + + +def cf_shared_gallery_image(cli_ctx, *_): + return cf_vm_cl(cli_ctx).shared_gallery_images + + +def cf_shared_gallery_image_version(cli_ctx, *_): + return cf_vm_cl(cli_ctx).shared_gallery_image_versions + + +def cf_community_gallery(cli_ctx, *_): + return cf_vm_cl(cli_ctx).community_galleries + + +def cf_community_gallery_image(cli_ctx, *_): + return cf_vm_cl(cli_ctx).community_gallery_images + + +def cf_community_gallery_image_version(cli_ctx, *_): + return cf_vm_cl(cli_ctx).community_gallery_image_versions + + +def cf_capacity_reservation_groups(cli_ctx, *_): + return cf_vm_cl(cli_ctx).capacity_reservation_groups + + +def cf_capacity_reservations(cli_ctx, *_): + return cf_vm_cl(cli_ctx).capacity_reservations + + +def cf_restore_point(cli_ctx, *_): + return cf_vm_cl(cli_ctx).restore_points + + +def cf_restore_point_collection(cli_ctx, *_): + return cf_vm_cl(cli_ctx).restore_point_collections diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_completers.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_completers.py new file mode 100644 index 00000000000..5a3686822b5 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_completers.py @@ -0,0 +1,34 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from azure.cli.command_modules.vm.azure_stack._actions import load_images_from_aliases_doc, get_vm_sizes +from azure.cli.core.commands.parameters import get_one_of_subscription_locations +from azure.cli.core.decorators import Completer + + +@Completer +def get_urn_aliases_completion_list(cmd, prefix, namespace): # pylint: disable=unused-argument + images = load_images_from_aliases_doc(cmd.cli_ctx) + return [i['urnAlias'] for i in images] + + +@Completer +def get_vm_size_completion_list(cmd, prefix, namespace): # pylint: disable=unused-argument + location = namespace.location + if not location: + location = get_one_of_subscription_locations(cmd.cli_ctx) + result = get_vm_sizes(cmd.cli_ctx, location) + return [r.name for r in result] + + +@Completer +def get_vm_run_command_completion_list(cmd, prefix, namespace): # pylint: disable=unused-argument + from ._client_factory import _compute_client_factory + try: + location = namespace.location + except AttributeError: + location = get_one_of_subscription_locations(cmd.cli_ctx) + result = _compute_client_factory(cmd.cli_ctx).virtual_machine_run_commands.list(location) + return [r.id for r in result] diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_constants.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_constants.py new file mode 100644 index 00000000000..5b3ff503907 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_constants.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# +# Generation mode: Incremental +# -------------------------------------------------------------------------- + +UPGRADE_SECURITY_HINT = 'Consider upgrading security for your workloads using Azure Trusted Launch VMs. ' \ + 'To know more about Trusted Launch, please visit ' \ + 'https://aka.ms/TrustedLaunch.' + +TLAD_DEFAULT_CHANGE_MSG = 'Ignite (November) 2023 onwards "{}" command will deploy Gen2-Trusted ' \ + 'Launch VM by default. To know more about the default change and Trusted Launch, ' \ + 'please visit https://aka.ms/TLaD' + +# The `Standard` is used for backward compatibility to allow customers to keep their current behavior +# after changing the default values of `security_type` to Trusted Launch VMs in the future. +COMPATIBLE_SECURITY_TYPE_VALUE = 'Standard' diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_format.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_format.py new file mode 100644 index 00000000000..ea9c19eadeb --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_format.py @@ -0,0 +1,166 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +def transform_ip_addresses(result): + from collections import OrderedDict + transformed = [] + for r in result: + network = r['virtualMachine']['network'] + public = network.get('publicIpAddresses') + public_ip_addresses = ','.join([p['ipAddress'] for p in public if p['ipAddress']]) if public else None + private = network.get('privateIpAddresses') + private_ip_addresses = ','.join(private) if private else None + entry = OrderedDict([('virtualMachine', r['virtualMachine']['name']), + ('publicIPAddresses', public_ip_addresses), + ('privateIPAddresses', private_ip_addresses)]) + transformed.append(entry) + + return transformed + + +def transform_vm(vm): + from collections import OrderedDict + result = OrderedDict([('name', vm['name']), + ('resourceGroup', vm['resourceGroup']), + ('powerState', vm.get('powerState')), + ('publicIps', vm.get('publicIps')), + ('fqdns', vm.get('fqdns')), + ('location', vm['location'])]) + if 'zones' in vm: + result['zones'] = ','.join(vm['zones']) if vm['zones'] else '' + return result + + +def transform_vm_create_output(result): + from azure.mgmt.core.tools import parse_resource_id + from collections import OrderedDict + try: + resource_group = getattr(result, 'resource_group', None) or parse_resource_id(result.id)['resource_group'] + output = OrderedDict([('id', result.id), + ('resourceGroup', resource_group), + ('powerState', result.power_state), + ('publicIpAddress', result.public_ips), + ('fqdns', result.fqdns), + ('privateIpAddress', result.private_ips), + ('macAddress', result.mac_addresses), + ('location', result.location)]) + if getattr(result, 'identity', None): + output['identity'] = result.identity + if hasattr(result, 'zones'): # output 'zones' column even the property value is None + output['zones'] = result.zones[0] if result.zones else '' + return output + except AttributeError: + from msrest.pipeline import ClientRawResponse + return None if isinstance(result, ClientRawResponse) else result + + +def transform_vm_usage_list(result): + result = list(result) + for item in result: + item.current_value = str(item.current_value) + item.limit = str(item.limit) + item.local_name = item.name.localized_value + return result + + +def transform_vm_list(vm_list): + return [transform_vm(v) for v in vm_list] + + +# flattern out important fields (single member arrays) to be displayed in the table output +def transform_sku_for_table_output(skus): + from collections import OrderedDict + result = [] + for k in skus: + order_dict = OrderedDict() + order_dict['resourceType'] = k['resourceType'] + order_dict['locations'] = str(k['locations']) if len(k['locations']) > 1 else k['locations'][0] + order_dict['name'] = k['name'] + if k.get('locationInfo'): + order_dict['zones'] = ','.join(sorted(k['locationInfo'][0].get('zones', []))) + else: + order_dict['zones'] = 'None' + if k['restrictions']: + reasons = [] + for x in k['restrictions']: + reason = x['reasonCode'] + if x['type']: + reason += ', type: ' + x['type'] + if x['restrictionInfo']['locations']: + reason += ', locations: ' + ','.join(x['restrictionInfo']['locations']) + if x['restrictionInfo']['zones']: + reason += ', zones: ' + ','.join(x['restrictionInfo']['zones']) + reasons.append(reason) + order_dict['restrictions'] = str(reasons) if len(reasons) > 1 else reasons[0] + else: + order_dict['restrictions'] = 'None' + for k2 in order_dict: + order_dict[k2] = order_dict[k2] if order_dict[k2] is not None else 'None' + result.append(order_dict) + return result + + +transform_extension_show_table_output = '{Name:name, ProvisioningState:provisioningState, Publisher:publisher, ' \ + 'Version:typeHandlerVersion, AutoUpgradeMinorVersion:autoUpgradeMinorVersion}' + +transform_disk_create_table_output = '{Name:name, ResourceGroup:resourceGroup, Location:location, Zones: ' \ + '(!zones && \' \') || join(` `, zones), Sku:sku.name, OsType:osType, ' \ + 'SizeGb:diskSizeGb, ProvisioningState:provisioningState}' + +transform_disk_show_table_output = '{Name:name, ResourceGroup:resourceGroup, Location:location, Zones: ' \ + '(!zones && \' \') || join(` `, zones), Sku:sku.name, OsType:osType, ' \ + 'SizeGb:diskSizeGB, ProvisioningState:provisioningState}' + + +def get_vmss_table_output_transformer(loader, for_list=True): + transform = '{Name:name, ResourceGroup:resourceGroup, Location:location, $zone$Capacity:sku.capacity, ' \ + 'Overprovision:overprovision, UpgradePolicy:upgradePolicy.mode}' + transform = transform.replace('$zone$', 'Zones: (!zones && \' \') || join(\' \', zones), ' + if loader.supported_api_version(min_api='2017-03-30') else ' ') + return transform if not for_list else '[].' + transform + + +transform_vmss_list_with_zones_table_output = '[].{Name:name, ResourceGroup:resourceGroup, Location:location, ' \ + 'Zones: (!zones && \' \') || join(\' \', zones), ' \ + 'Capacity:sku.capacity, Overprovision:overprovision, ' \ + 'UpgradePolicy:upgradePolicy.mode}' + +transform_vmss_list_without_zones_table_output = '[].{Name:name, ResourceGroup:resourceGroup, Location:location, ' \ + 'Capacity:sku.capacity, Overprovision:overprovision, ' \ + 'UpgradePolicy:upgradePolicy.mode}' + + +def transform_vm_encryption_show_table_output(result): + from collections import OrderedDict + if result.get("status", []): + status_dict = result["status"][0] + return OrderedDict([("status", status_dict.get("displayStatus", "N/A")), + ("message", status_dict.get("message", "N/A"))]) + return result + + +def transform_log_analytics_query_output(result): + from collections import OrderedDict + tables_output = [] + + def _transform_query_output(table): + name = table.name + rows = table.rows + column_names = table.columns + table_output = [] + for row in rows: + item = OrderedDict() + item['TableName'] = name + for index, value in enumerate(row): + item[column_names[index]] = str(value) + table_output.append(item) + return table_output + + for table in result.tables: + table_output = _transform_query_output(table) + tables_output.extend(table_output) + + return tables_output diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_image_builder.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_image_builder.py new file mode 100644 index 00000000000..d983585bfd3 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_image_builder.py @@ -0,0 +1,1014 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# TODO refactor out _image_builder commands. +# i.e something like image_builder/_client_factory image_builder/commands.py image_builder/_params.py +import os +import re +import json +import traceback +from enum import Enum + +import requests + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # pylint: disable=import-error + +from knack.util import CLIError +from knack.log import get_logger + +from azure.mgmt.core.tools import is_valid_resource_id, resource_id, parse_resource_id + +from azure.core.exceptions import HttpResponseError + +from azure.cli.core.commands import cached_get, cached_put, LongRunningOperation +from azure.cli.core.commands.client_factory import get_subscription_id +from azure.cli.core.commands.validators import get_default_location_from_resource_group, validate_tags +from azure.cli.core.azclierror import RequiredArgumentMissingError, ResourceNotFoundError + +from azure.cli.command_modules.vm.azure_stack._client_factory import _compute_client_factory +from azure.cli.command_modules.vm.azure_stack._validators import _get_resource_id + +logger = get_logger(__name__) + + +class _SourceType(Enum): + PLATFORM_IMAGE = "PlatformImage" + ISO_URI = "ISO" + MANAGED_IMAGE = "ManagedImage" + SIG_VERSION = "SharedImageVersion" + + +class _DestType(Enum): + MANAGED_IMAGE = 1 + SHARED_IMAGE_GALLERY = 2 + + +class ScriptType(Enum): + SHELL = "shell" + POWERSHELL = "powershell" + WINDOWS_RESTART = "windows-restart" + WINDOWS_UPDATE = "windows-update" + FILE = "file" + + +class GalleryImageReferenceType(Enum): + COMPUTE = (0, 'id') + COMMUNITY = (1, 'communityGalleryImageId') + SHARED = (2, 'sharedGalleryImageId') + + def __init__(self, index, backend_key): + self.index = index + self.backend_key = backend_key + + +# region Client Factories + +def image_builder_client_factory(cli_ctx, _): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.mgmt.imagebuilder import ImageBuilderClient + client = get_mgmt_service_client(cli_ctx, ImageBuilderClient) + return client + + +def cf_img_bldr_image_templates(cli_ctx, _): + return image_builder_client_factory(cli_ctx, _).virtual_machine_image_templates + +# endregion + + +def _no_white_space_or_err(words): + for char in words: + if char.isspace(): + raise CLIError("Error: White space in {}".format(words)) + + +def _require_defer(cmd): + use_cache = cmd.cli_ctx.data.get('_cache', False) + if not use_cache: + raise CLIError("This command requires --defer") + + +def patch_image_template(cli_ctx, resource_group_name, image_template_name, image_template_update): + client = image_builder_client_factory(cli_ctx, '') + poller = client.virtual_machine_image_templates.begin_update(resource_group_name, image_template_name, + image_template_update) + return LongRunningOperation(cli_ctx)(poller) + + +def _parse_script(script_str): + script_name = script_str + script = {"script": script_str, "name": script_name, "type": None} + if urlparse(script_str).scheme and "://" in script_str: + _, script_name = script_str.rsplit("/", 1) + script["name"] = script_name + script["is_url"] = True + else: + raise CLIError("Expected a url, got: {}".format(script_str)) + + if script_str.lower().endswith(".sh"): + script["type"] = ScriptType.SHELL + elif script_str.lower().endswith(".ps1"): + script["type"] = ScriptType.POWERSHELL + + return script + + +def _parse_image_destination(cmd, rg, destination, is_shared_image): + if any([not destination, "=" not in destination]): + raise CLIError("Invalid Format: the given image destination {} must contain the '=' delimiter." + .format(destination)) + + rid, location = destination.rsplit("=", 1) + if not rid or not location: + raise CLIError("Invalid Format: destination {} should have format 'destination=location'.".format(destination)) + + _no_white_space_or_err(rid) + + result = None + if is_shared_image: + if not is_valid_resource_id(rid): + if "/" not in rid: + raise CLIError("Invalid Format: {} must have a shared image gallery name and definition. " + "They must be delimited by a '/'.".format(rid)) + + sig_name, sig_def = rid.rsplit("/", 1) + + rid = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=rg, + namespace='Microsoft.Compute', + type='galleries', name=sig_name, + child_type_1='images', child_name_1=sig_def + ) + + result = rid, location.split(",") + else: + if not is_valid_resource_id(rid): + rid = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=rg, + namespace='Microsoft.Compute', type='images', + name=rid + ) + + result = rid, location + + return result + + +def _validate_location(location, location_names, location_display_names): + if ' ' in location: + # if display name is provided, attempt to convert to short form name + location = next((name for name in location_display_names if name.lower() == location.lower()), location) + + if location.lower() not in [location_name.lower() for location_name in location_names]: + raise CLIError("Location {} is not a valid subscription location. " + "Use one from `az account list-locations`.".format(location)) + + return location + + +def process_image_template_create_namespace(cmd, namespace): # pylint: disable=too-many-locals, too-many-branches, too-many-statements + if namespace.image_template is not None: + return + + from azure.cli.core.commands.parameters import get_subscription_locations + + source = None + scripts = [] + + # default location to RG location. + if not namespace.location: + get_default_location_from_resource_group(cmd, namespace) + + # validate tags. + validate_tags(namespace) + + # Validate and parse scripts + if namespace.scripts: + for ns_script in namespace.scripts: + scripts.append(_parse_script(ns_script)) + + # Validate and parse destination and locations + destinations = [] + subscription_locations = get_subscription_locations(cmd.cli_ctx) + location_names = [location.name for location in subscription_locations] + location_display_names = [location.display_name for location in subscription_locations] + + if namespace.managed_image_destinations: + for dest in namespace.managed_image_destinations: + rid, location = _parse_image_destination(cmd, namespace.resource_group_name, dest, is_shared_image=False) + location = _validate_location(location, location_names, location_display_names) + destinations.append((_DestType.MANAGED_IMAGE, rid, location)) + + if namespace.shared_image_destinations: + for dest in namespace.shared_image_destinations: + rid, locations = _parse_image_destination(cmd, namespace.resource_group_name, dest, is_shared_image=True) + locations = [_validate_location(location, location_names, location_display_names) for location in locations] + destinations.append((_DestType.SHARED_IMAGE_GALLERY, rid, locations)) + + # Validate and parse source image + # 1 - check if source is a URN. A urn e.g "Canonical:UbuntuServer:18.04-LTS:latest" + urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.source) + if urn_match: # if platform image urn + source = { + 'publisher': urn_match.group(1), + 'offer': urn_match.group(2), + 'sku': urn_match.group(3), + 'version': urn_match.group(4), + 'type': _SourceType.PLATFORM_IMAGE + } + + likely_linux = bool("windows" not in source["offer"].lower() and "windows" not in source["sku"].lower()) + + logger.info("%s looks like a platform image URN", namespace.source) + + # 2 - check if a fully-qualified ID (assumes it is an image ID) + elif is_valid_resource_id(namespace.source): + + parsed = parse_resource_id(namespace.source) + image_type = parsed.get('type') + image_resource_type = parsed.get('type') + + if not image_type: + pass + + elif image_type.lower() == 'images': + source = { + 'image_id': namespace.source, + 'type': _SourceType.MANAGED_IMAGE + } + logger.info("%s looks like a managed image id.", namespace.source) + + elif image_type == "galleries" and image_resource_type: + source = { + 'image_version_id': namespace.source, + 'type': _SourceType.SIG_VERSION + } + logger.info("%s looks like a shared image version id.", namespace.source) + + # 3 - check if source is a Redhat iso uri. If so a checksum must be provided. + elif urlparse(namespace.source).scheme and "://" in namespace.source and ".iso" in namespace.source.lower(): + if not namespace.checksum: + raise CLIError("Must provide a checksum for source uri: {}".format(namespace.source)) + source = { + 'source_uri': namespace.source, + 'sha256_checksum': namespace.checksum, + 'type': _SourceType.ISO_URI + } + likely_linux = True + + logger.info("%s looks like a RedHat iso uri.", namespace.source) + + # 4 - check if source is a urn alias from the vmImageAliasDoc endpoint. See "az cloud show" + if not source: + from azure.cli.command_modules.vm.azure_stack._actions import load_images_from_aliases_doc + images = load_images_from_aliases_doc(cmd.cli_ctx) + matched = next((x for x in images if x['urnAlias'].lower() == namespace.source.lower()), None) + if matched: + source = { + 'publisher': matched['publisher'], + 'offer': matched['offer'], + 'sku': matched['sku'], + 'version': matched['version'], + 'type': _SourceType.PLATFORM_IMAGE + } + + if "windows" not in source["offer"].lower() and "windows" not in source["sku"].lower(): + likely_linux = True + + logger.info("%s looks like a platform image alias.", namespace.source) + + # 5 - check if source is an existing managed disk image resource + if not source: + compute_client = _compute_client_factory(cmd.cli_ctx) + try: + image_name = namespace.source + compute_client.images.get(namespace.resource_group_name, namespace.source) + namespace.source = _get_resource_id(cmd.cli_ctx, namespace.source, namespace.resource_group_name, + 'images', 'Microsoft.Compute') + source = { + 'image_id': namespace.source, + 'type': _SourceType.MANAGED_IMAGE + } + + logger.info("%s, looks like a managed image name. Using resource ID: %s", image_name, namespace.source) # pylint: disable=line-too-long + except HttpResponseError: + pass + + if not source: + err = 'Invalid image "{}". Use a valid image URN, managed image name or ID, ISO URI, ' \ + 'or pick a platform image alias from {}.\nSee vm create -h for more information on specifying an image.'\ + .format(namespace.source, ", ".join([x['urnAlias'] for x in images])) + raise CLIError(err) + + for script in scripts: + if script["type"] is None: + try: + script["type"] = ScriptType.SHELL if likely_linux else ScriptType.POWERSHELL # pylint: disable=used-before-assignment + logger.info("For script %s, likely linux is %s.", script["script"], likely_linux) + except NameError: + raise CLIError("Unable to infer the type of script {}.".format(script["script"])) + + namespace.source_dict = source + namespace.scripts_list = scripts + namespace.destinations_lists = destinations + + +# first argument is `cmd`, but it is unused. Feel free to substitute it in. +def process_img_tmpl_customizer_add_namespace(cmd, namespace): # pylint:disable=unused-argument + + if namespace.customizer_type.lower() in [ScriptType.SHELL.value.lower(), ScriptType.POWERSHELL.value.lower()]: # pylint:disable=no-member, line-too-long + if not (namespace.script_url or namespace.inline_script): + raise CLIError("A script must be provided if the customizer type is one of: {} {}" + .format(ScriptType.SHELL.value, ScriptType.POWERSHELL.value)) + + if namespace.script_url and namespace.inline_script: + raise CLIError("Cannot supply both script url and inline script.") + + elif namespace.customizer_type.lower() == ScriptType.WINDOWS_RESTART.value.lower(): # pylint:disable=no-member + if namespace.script_url or namespace.inline_script: + logger.warning("Ignoring the supplied script as scripts are not used for Windows Restart.") + + +def process_img_tmpl_output_add_namespace(cmd, namespace): + from azure.cli.core.commands.parameters import get_subscription_locations + + outputs = [output for output in [namespace.managed_image, namespace.gallery_image_definition, namespace.is_vhd] if output] # pylint:disable=line-too-long + + if len(outputs) != 1: + err = "Supplied outputs: {}".format(outputs) + logger.debug(err) + raise CLIError("Usage error: must supply exactly one destination type to add. Supplied {}".format(len(outputs))) + + if namespace.managed_image: + if not is_valid_resource_id(namespace.managed_image): + namespace.managed_image = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', type='images', + name=namespace.managed_image + ) + + if namespace.gallery_image_definition: + if not is_valid_resource_id(namespace.gallery_image_definition): + if not namespace.gallery_name: + raise CLIError("Usage error: gallery image definition is a name and not an ID.") + + namespace.gallery_image_definition = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', + type='galleries', name=namespace.gallery_name, + child_type_1='images', child_name_1=namespace.gallery_image_definition + ) + + if namespace.is_vhd and not namespace.output_name: + raise CLIError("Usage error: If --is-vhd is used, a run output name must be provided via --output-name.") + + subscription_locations = get_subscription_locations(cmd.cli_ctx) + location_names = [location.name for location in subscription_locations] + location_display_names = [location.display_name for location in subscription_locations] + + if namespace.managed_image_location: + namespace.managed_image_location = _validate_location(namespace.managed_image_location, + location_names, location_display_names) + + if namespace.gallery_replication_regions: + processed_regions = [] + for loc in namespace.gallery_replication_regions: + processed_regions.append(_validate_location(loc, location_names, location_display_names)) + namespace.gallery_replication_regions = processed_regions + + # get default location from resource group + if not any([namespace.managed_image_location, namespace.gallery_replication_regions]) and hasattr(namespace, 'location'): # pylint: disable=line-too-long + # store location in namespace.location for use in custom method. + get_default_location_from_resource_group(cmd, namespace) + + # validate tags. + validate_tags(namespace) + + +# region Custom Commands + +def create_image_template( # pylint: disable=too-many-locals, too-many-branches, too-many-statements, unused-argument + cmd, client, resource_group_name, image_template_name, location=None, + source_dict=None, scripts_list=None, destinations_lists=None, build_timeout=None, tags=None, + source=None, scripts=None, checksum=None, managed_image_destinations=None, + shared_image_destinations=None, no_wait=False, image_template=None, identity=None, + vm_size=None, os_disk_size=None, vnet=None, subnet=None, proxy_vm_size=None, build_vm_identities=None, + staging_resource_group=None, validator=None): + from azure.mgmt.imagebuilder.models import (ImageTemplate, ImageTemplateSharedImageVersionSource, + ImageTemplatePlatformImageSource, ImageTemplateManagedImageSource, + ImageTemplateShellCustomizer, ImageTemplatePowerShellCustomizer, + ImageTemplateManagedImageDistributor, + ImageTemplateSharedImageDistributor, ImageTemplateIdentity, + UserAssignedIdentity, ImageTemplateVmProfile, VirtualNetworkConfig, + ImageTemplatePropertiesValidate, ImageTemplateInVMValidator) + + if image_template is not None: + logger.warning('You are using --image-template. All other parameters will be ignored.') + if os.path.exists(image_template): + # Local file + with open(image_template) as f: + content = f.read() + else: + # It should be an URL + msg = '\nusage error: --image-template is not a correct local path or URL' + try: + r = requests.get(image_template) + except Exception: + raise CLIError(traceback.format_exc() + msg) + if r.status_code != 200: + raise CLIError(traceback.format_exc() + msg) + content = r.content + + try: + obj = json.loads(content) + except json.JSONDecodeError: + raise CLIError(traceback.format_exc() + + '\nusage error: Content of --image-template is not a valid JSON string') + content = {} + if 'properties' in obj: + content = obj['properties'] + if 'location' in obj: + content['location'] = obj['location'] + if 'tags' in obj: + content['tags'] = obj['tags'] + if 'identity' in obj: + content['identity'] = obj['identity'] + if 'staging_resource_group' in obj: + content['staging_resource_group'] = obj['staging_resource_group'] + if 'validate' in obj: + content['validate'] = obj['validate'] + return client.virtual_machine_image_templates.begin_create_or_update( + parameters=content, resource_group_name=resource_group_name, image_template_name=image_template_name) + + template_source, template_scripts, template_destinations = None, [], [] + + # create image template source settings + if source_dict['type'] == _SourceType.PLATFORM_IMAGE: + template_source = ImageTemplatePlatformImageSource(**source_dict) + elif source_dict['type'] == _SourceType.ISO_URI: + # It was supported before but is removed in the current service version. + raise CLIError('usage error: Source type ISO URI is not supported.') + elif source_dict['type'] == _SourceType.MANAGED_IMAGE: + template_source = ImageTemplateManagedImageSource(**source_dict) + elif source_dict['type'] == _SourceType.SIG_VERSION: + template_source = ImageTemplateSharedImageVersionSource(**source_dict) + + # create image template customizer settings + # Script structure can be found in _parse_script's function definition + for script in scripts_list: + script.pop("is_url") + script["script_uri"] = script.pop("script") + + if script["type"] == ScriptType.SHELL: + template_scripts.append(ImageTemplateShellCustomizer(**script)) + elif script["type"] == ScriptType.POWERSHELL: + template_scripts.append(ImageTemplatePowerShellCustomizer(**script)) + else: # Should never happen + logger.debug("Script %s has type %s", script["script"], script["type"]) + raise CLIError("Script {} has an invalid type.".format(script["script"])) + + # create image template distribution / destination settings + for dest_type, rid, loc_info in destinations_lists: + parsed = parse_resource_id(rid) + if dest_type == _DestType.MANAGED_IMAGE: + template_destinations.append(ImageTemplateManagedImageDistributor( + image_id=rid, location=loc_info, run_output_name=parsed['name'])) + elif dest_type == _DestType.SHARED_IMAGE_GALLERY: + template_destinations.append(ImageTemplateSharedImageDistributor( + gallery_image_id=rid, replication_regions=loc_info, run_output_name=parsed['child_name_1'])) + else: + logger.info("No applicable destination found for destination %s", str(tuple([dest_type, rid, loc_info]))) + + # Identity + identity_body = None + if identity is not None: + subscription_id = get_subscription_id(cmd.cli_ctx) + user_assigned_identities = {} + for ide in identity: + if not is_valid_resource_id(ide): + ide = resource_id(subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.ManagedIdentity', type='userAssignedIdentities', name=ide) + user_assigned_identities[ide] = UserAssignedIdentity() # pylint: disable=line-too-long + identity_body = ImageTemplateIdentity(type='UserAssigned', user_assigned_identities=user_assigned_identities) + + # VM profile + vnet_config = None + if vnet or subnet: + if not is_valid_resource_id(subnet): + subscription_id = get_subscription_id(cmd.cli_ctx) + subnet = resource_id(subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Network', type='virtualNetworks', name=vnet, + child_type_1='subnets', child_name_1=subnet) + vnet_config = VirtualNetworkConfig(subnet_id=subnet) + if proxy_vm_size is not None: + if subnet is not None: + vnet_config = VirtualNetworkConfig(subnet_id=subnet, proxy_vm_size=proxy_vm_size) + else: + raise RequiredArgumentMissingError( + 'Usage error: --proxy-vm-size is only configurable when --subnet is specified.') + vm_profile = ImageTemplateVmProfile(vm_size=vm_size, os_disk_size_gb=os_disk_size, user_assigned_identities=build_vm_identities, vnet_config=vnet_config) # pylint: disable=line-too-long + + validate = None + if validator: + in_vm_validations = [] + for item in validator: + validator_item = ImageTemplateInVMValidator() + validator_item.type = item + in_vm_validations.append(validator_item) + validate = ImageTemplatePropertiesValidate(in_vm_validations=in_vm_validations) + + image_template = ImageTemplate(source=template_source, distribute=template_destinations, + location=location, build_timeout_in_minutes=build_timeout, tags=(tags or {}), + identity=identity_body, vm_profile=vm_profile, validate=validate, + staging_resource_group=staging_resource_group) + + if len(template_scripts) > 0: + image_template.customize = template_scripts + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=image_template, + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def assign_template_identity(cmd, client, resource_group_name, image_template_name, user_assigned=None): + from azure.mgmt.imagebuilder.models import (ImageTemplateIdentity, ImageTemplateUpdateParameters, + UserAssignedIdentity) + + from azure.cli.core.commands.arm import assign_identity as assign_identity_helper + + def getter(): + return client.virtual_machine_image_templates.get(resource_group_name, image_template_name) + + def setter(image_template): + existing_user_identities = set() + if image_template.identity is not None: + existing_user_identities = {x.lower() for x in + list((image_template.identity.user_assigned_identities or {}).keys())} + + subscription_id = get_subscription_id(cmd.cli_ctx) + add_user_assigned = set() + for ide in user_assigned: + if not is_valid_resource_id(ide): + ide = resource_id(subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.ManagedIdentity', type='userAssignedIdentities', name=ide) + add_user_assigned.add(ide.lower()) + + updated_user_assigned = list(existing_user_identities.union(add_user_assigned)) + + default_user_identity = UserAssignedIdentity() # pylint: disable=line-too-long + user_assigned_identities = dict.fromkeys(updated_user_assigned, default_user_identity) + + image_template_identity = ImageTemplateIdentity(type='UserAssigned', + user_assigned_identities=user_assigned_identities) + image_template_update = ImageTemplateUpdateParameters() + image_template_update.identity = image_template_identity + return patch_image_template(cmd.cli_ctx, resource_group_name, image_template_name, image_template_update) + + image_template = assign_identity_helper(cmd.cli_ctx, getter, setter) + return image_template.identity + + +def remove_template_identity(cmd, client, resource_group_name, image_template_name, user_assigned=None): + from azure.mgmt.imagebuilder.models import ImageTemplateUpdateParameters + + def getter(): + return client.virtual_machine_image_templates.get(resource_group_name, image_template_name) + + def setter(resource_group_name, image_template_name, image_template): + image_template_update = ImageTemplateUpdateParameters(identity=image_template.identity) + return client.virtual_machine_image_templates.begin_update(resource_group_name, image_template_name, + image_template_update) + + return _remove_template_identity(cmd, resource_group_name, image_template_name, user_assigned, getter, setter) + + +def _remove_template_identity(cmd, resource_group_name, image_template_name, user_assigned, getter, setter): + resource = getter() + if resource is None or resource.identity is None: + return None + + user_identities_to_remove = [] + if user_assigned is not None: + existing_user_identities = {x.lower() for x in list((resource.identity.user_assigned_identities or {}).keys())} + # all user assigned identities will be removed if the length of user_assigned is 0, + # otherwise the specified identity + subscription_id = get_subscription_id(cmd.cli_ctx) + user_identities_to_remove = existing_user_identities if len(user_assigned) == 0 else set() + for ide in user_assigned: + if not is_valid_resource_id(ide): + ide = resource_id(subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.ManagedIdentity', type='userAssignedIdentities', name=ide) + user_identities_to_remove.add(ide.lower()) + + non_existing = user_identities_to_remove.difference(existing_user_identities) + if non_existing: + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError("'{}' are not associated with '{}', please provide existing user managed " + "identities".format(','.join(non_existing), image_template_name)) + + if not list(existing_user_identities - user_identities_to_remove): + resource.identity.type = "None" + resource.identity.user_assigned_identities = None + + if user_identities_to_remove and resource.identity.type != "None": + resource.identity.user_assigned_identities = {} + for identity in user_identities_to_remove: + resource.identity.user_assigned_identities[identity] = None + + result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, image_template_name, resource)) + return result.identity + + +def show_template_identity(client, resource_group_name, image_template_name): + return client.virtual_machine_image_templates.get(resource_group_name, image_template_name).identity + + +def list_image_templates(client, resource_group_name=None): + if resource_group_name: + return client.virtual_machine_image_templates.list_by_resource_group(resource_group_name) + return client.virtual_machine_image_templates.list() + + +def show_build_output(client, resource_group_name, image_template_name, output_name=None): + if output_name: + return client.virtual_machine_image_templates.get_run_output(resource_group_name, image_template_name, output_name) # pylint: disable=line-too-long + return client.virtual_machine_image_templates.list_run_outputs(resource_group_name, image_template_name) + + +def add_template_output(cmd, client, resource_group_name, image_template_name, gallery_name=None, location=None, # pylint: disable=line-too-long, unused-argument + output_name=None, is_vhd=None, vhd_uri=None, tags=None, + gallery_image_definition=None, gallery_replication_regions=None, + managed_image=None, managed_image_location=None, versioning=None): # pylint: disable=line-too-long, unused-argument + + _require_defer(cmd) + + from azure.mgmt.imagebuilder.models import (ImageTemplateManagedImageDistributor, ImageTemplateVhdDistributor, + ImageTemplateSharedImageDistributor, DistributeVersioner) + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + distributor = None + + if managed_image: + parsed = parse_resource_id(managed_image) + distributor = ImageTemplateManagedImageDistributor( + run_output_name=output_name or parsed['name'], + image_id=managed_image, location=managed_image_location or location) + elif gallery_image_definition: + parsed = parse_resource_id(gallery_image_definition) + distributor = ImageTemplateSharedImageDistributor( + run_output_name=output_name or parsed['child_name_1'], gallery_image_id=gallery_image_definition, + replication_regions=gallery_replication_regions or [location]) + if versioning: + versioner = DistributeVersioner() + versioner.scheme = versioning + distributor.versioning = versioner + elif is_vhd: + distributor = ImageTemplateVhdDistributor(run_output_name=output_name, uri=vhd_uri) + + if distributor: + distributor.artifact_tags = tags or {} + + if existing_image_template.distribute is None: + existing_image_template.distribute = [] + else: + for existing_distributor in existing_image_template.distribute: + if existing_distributor.run_output_name == distributor.run_output_name: + raise CLIError("Output with output name {} already exists in image template {}." + .format(distributor.run_output_name.lower(), image_template_name)) + + existing_image_template.distribute.append(distributor) + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def remove_template_output(cmd, client, resource_group_name, image_template_name, output_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + if not existing_image_template.distribute: + raise CLIError("No outputs to remove.") + + new_distribute = [] + for existing_distributor in existing_image_template.distribute: + if existing_distributor.run_output_name.lower() == output_name.lower(): + continue + new_distribute.append(existing_distributor) + + if len(new_distribute) == len(existing_image_template.distribute): + raise CLIError("Output with output name {} not in image template distribute list.".format(output_name)) + + existing_image_template.distribute = new_distribute + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def clear_template_output(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + if not existing_image_template.distribute: + raise CLIError("No outputs to remove.") + + existing_image_template.distribute = [] + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def set_template_output_versioning(cmd, client, resource_group_name, image_template_name, output_name, scheme, major=None): # pylint: disable=line-too-long, unused-argument + + _require_defer(cmd) + + from azure.mgmt.imagebuilder.models import DistributeVersionerLatest, DistributeVersionerSource + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + distribute = [distribute for distribute in existing_image_template.distribute + if distribute.run_output_name.lower() == output_name.lower()] \ + if existing_image_template.distribute else [] + + if not distribute: + raise CLIError("Output with output name {} not in image template distribute list.".format(output_name)) + + if scheme == 'Latest': + distribute[0].versioning = DistributeVersionerLatest(scheme=scheme, major=major) + else: + distribute[0].versioning = DistributeVersionerSource(scheme=scheme) + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def remove_template_output_versioning(cmd, client, resource_group_name, image_template_name, output_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + distribute = [distribute for distribute in existing_image_template.distribute + if distribute.run_output_name.lower() == output_name.lower()] \ + if existing_image_template.distribute else [] + + if not distribute: + raise CLIError("Output with output name {} not in image template distribute list.".format(output_name)) + + distribute[0].versioning = None + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def show_template_output_versioning(cmd, client, resource_group_name, image_template_name, output_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + distribute = [distribute for distribute in existing_image_template.distribute + if distribute.run_output_name.lower() == output_name.lower()] \ + if existing_image_template.distribute else [] + + if not distribute: + raise CLIError("Output with output name {} not in image template distribute list.".format(output_name)) + + return distribute[0].versioning + + +def add_template_customizer(cmd, client, resource_group_name, image_template_name, customizer_name, customizer_type, + script_url=None, inline_script=None, valid_exit_codes=None, + restart_command=None, restart_check_command=None, restart_timeout=None, + file_source=None, dest_path=None, search_criteria=None, filters=None, update_limit=None): + _require_defer(cmd) + + from azure.mgmt.imagebuilder.models import (ImageTemplateShellCustomizer, ImageTemplatePowerShellCustomizer, + ImageTemplateRestartCustomizer, ImageTemplateFileCustomizer, + ImageTemplateWindowsUpdateCustomizer) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + if existing_image_template.customize is None: + existing_image_template.customize = [] + else: + for existing_customizer in existing_image_template.customize: + if existing_customizer.name == customizer_name: + raise CLIError("Output with output name {} already exists in image template {}." + .format(customizer_name, image_template_name)) + + new_customizer = None + + if customizer_type.lower() == ScriptType.SHELL.value.lower(): # pylint:disable=no-member + new_customizer = ImageTemplateShellCustomizer(name=customizer_name, script_uri=script_url, inline=inline_script) + elif customizer_type.lower() == ScriptType.POWERSHELL.value.lower(): # pylint:disable=no-member + new_customizer = ImageTemplatePowerShellCustomizer(name=customizer_name, script_uri=script_url, + inline=inline_script, valid_exit_codes=valid_exit_codes) + elif customizer_type.lower() == ScriptType.WINDOWS_RESTART.value.lower(): # pylint:disable=no-member + new_customizer = ImageTemplateRestartCustomizer(name=customizer_name, restart_command=restart_command, + restart_check_command=restart_check_command, + restart_timeout=restart_timeout) + elif customizer_type.lower() == ScriptType.FILE.value.lower(): # pylint:disable=no-member + new_customizer = ImageTemplateFileCustomizer(name=customizer_name, source_uri=file_source, + destination=dest_path) + elif customizer_type.lower() == ScriptType.WINDOWS_UPDATE.value.lower(): + new_customizer = ImageTemplateWindowsUpdateCustomizer(name=customizer_name, search_criteria=search_criteria, + filters=filters, update_limit=update_limit) + + if not new_customizer: + raise CLIError("Cannot determine customizer from type {}.".format(customizer_type)) + + existing_image_template.customize.append(new_customizer) + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def remove_template_customizer(cmd, client, resource_group_name, image_template_name, customizer_name): + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + _require_defer(cmd) + + if not existing_image_template.customize: + raise CLIError("No customizers to remove.") + + new_customize = [] + for existing_customizer in existing_image_template.customize: + if existing_customizer.name == customizer_name: + continue + new_customize.append(existing_customizer) + + if len(new_customize) == len(existing_image_template.customize): + raise CLIError("Customizer with name {} not in image template customizer list.".format(customizer_name)) + + existing_image_template.customize = new_customize + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def clear_template_customizer(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + if not existing_image_template.customize: + raise CLIError("No customizers to remove.") + + existing_image_template.customize = [] + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def add_template_validator(cmd, client, resource_group_name, image_template_name, + dis_on_failure=False, source_validation_only=False): + _require_defer(cmd) + from azure.mgmt.imagebuilder.models import ImageTemplatePropertiesValidate + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + if not existing_image_template.validate: + existing_image_template.validate = ImageTemplatePropertiesValidate( + continue_distribute_on_failure=dis_on_failure, source_validation_only=source_validation_only) + else: + existing_image_template.validate.continue_distribute_on_failure = dis_on_failure + existing_image_template.validate.source_validation_only = source_validation_only + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, + parameters=existing_image_template, resource_group_name=resource_group_name, + image_template_name=image_template_name) + + +def remove_template_validator(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + if not existing_image_template.validate: + raise ResourceNotFoundError("No validate existing in this image template, no need to remove.") + + existing_image_template.validate = None + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def show_template_validator(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + return existing_image_template.validate + + +def add_or_update_template_optimizer(cmd, client, resource_group_name, image_template_name, enable_vm_boot=None): + _require_defer(cmd) + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + from azure.mgmt.imagebuilder.models import ImageTemplatePropertiesOptimize, ImageTemplatePropertiesOptimizeVmBoot + image_template_properties_optimize = existing_image_template.optimize or ImageTemplatePropertiesOptimize() + + if enable_vm_boot is not None: + state = "Enabled" if enable_vm_boot else "Disabled" + vm_boot = ImageTemplatePropertiesOptimizeVmBoot(state=state) + image_template_properties_optimize.vm_boot = vm_boot + existing_image_template.optimize = image_template_properties_optimize + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, + parameters=existing_image_template, resource_group_name=resource_group_name, + image_template_name=image_template_name) + + +def remove_template_optimizer(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + if not existing_image_template.optimize: + raise ResourceNotFoundError("No optimize existing in this image template, no need to clear.") + + existing_image_template.optimize = None + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, parameters=existing_image_template, # pylint: disable=line-too-long + resource_group_name=resource_group_name, image_template_name=image_template_name) + + +def show_template_optimizer(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + return existing_image_template.optimize + + +def add_template_error_handler(cmd, client, resource_group_name, image_template_name, + on_customizer_error=None, on_validation_error=None): + _require_defer(cmd) + from azure.mgmt.imagebuilder.models import ImageTemplatePropertiesErrorHandling + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + if not existing_image_template.error_handling: + existing_image_template.error_handling = ImageTemplatePropertiesErrorHandling( + on_customizer_error=on_customizer_error, on_validation_error=on_validation_error + ) + else: + existing_image_template.error_handling.on_customizer_error = on_customizer_error + existing_image_template.error_handling.on_validation_error = on_validation_error + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, + parameters=existing_image_template, resource_group_name=resource_group_name, + image_template_name=image_template_name) + + +def remove_template_error_handler(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + + if not existing_image_template.error_handling: + raise ResourceNotFoundError("No error handler existing in this image template, no need to clear.") + + existing_image_template.error_handling = None + + return cached_put(cmd, client.virtual_machine_image_templates.begin_create_or_update, + parameters=existing_image_template, resource_group_name=resource_group_name, + image_template_name=image_template_name) + + +def show_template_error_handler(cmd, client, resource_group_name, image_template_name): + _require_defer(cmd) + + existing_image_template = cached_get(cmd, client.virtual_machine_image_templates.get, + resource_group_name=resource_group_name, + image_template_name=image_template_name) + return existing_image_template.error_handling +# endregion diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_params.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_params.py new file mode 100644 index 00000000000..566d3cd4f81 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_params.py @@ -0,0 +1,1726 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# pylint: disable=line-too-long, too-many-lines +from argcomplete.completers import FilesCompleter + +from knack.arguments import CLIArgumentType +from knack.deprecation import Deprecated + +from azure.cli.core.profiles import ResourceType +from azure.cli.core.commands.parameters import get_datetime_type +from azure.cli.core.commands.validators import ( + get_default_location_from_resource_group, validate_file_or_dict) +from azure.cli.core.commands.parameters import ( + get_location_type, get_resource_name_completion_list, tags_type, get_three_state_flag, + file_type, get_enum_type, zone_type, zones_type) +from azure.cli.command_modules.vm.azure_stack._actions import _resource_not_exists +from azure.cli.command_modules.vm.azure_stack._completers import ( + get_urn_aliases_completion_list, get_vm_size_completion_list, get_vm_run_command_completion_list) +from azure.cli.command_modules.vm.azure_stack._constants import COMPATIBLE_SECURITY_TYPE_VALUE +from azure.cli.command_modules.vm.azure_stack._validators import ( + validate_nsg_name, validate_vm_nics, validate_vm_nic, validate_vmss_disk, + validate_asg_names_or_ids, validate_keyvault, _validate_proximity_placement_group, + validate_vm_name_for_monitor_metrics, validate_secure_vm_guest_state_sas) + +from azure.cli.command_modules.vm.azure_stack._vm_utils import MSI_LOCAL_ID +from azure.cli.command_modules.vm.azure_stack._image_builder import ScriptType + +from azure.cli.command_modules.monitor.validators import validate_metric_dimension +from azure.cli.command_modules.monitor.actions import get_period_type + + +# pylint: disable=too-many-statements, too-many-branches, too-many-locals, too-many-lines +def load_arguments(self, _): + # Model imports + DiskStorageAccountTypes = self.get_models('DiskStorageAccountTypes', operation_group='disks') + SnapshotStorageAccountTypes = self.get_models('SnapshotStorageAccountTypes', operation_group='snapshots') + UpgradeMode, CachingTypes, OperatingSystemTypes = self.get_models('UpgradeMode', 'CachingTypes', 'OperatingSystemTypes') + HyperVGenerationTypes = self.get_models('HyperVGenerationTypes') + DedicatedHostLicenseTypes = self.get_models('DedicatedHostLicenseTypes') + OrchestrationServiceNames, OrchestrationServiceStateAction = self.get_models('OrchestrationServiceNames', 'OrchestrationServiceStateAction', operation_group='virtual_machine_scale_sets') + RebootSetting, VMGuestPatchClassificationWindows, VMGuestPatchClassificationLinux = self.get_models('VMGuestPatchRebootSetting', 'VMGuestPatchClassificationWindows', 'VMGuestPatchClassificationLinux') + GallerySharingPermissionTypes = self.get_models('GallerySharingPermissionTypes', operation_group='shared_galleries') + ReplicationMode = self.get_models('ReplicationMode', operation_group='gallery_image_versions') + DiskControllerTypes = self.get_models('DiskControllerTypes', operation_group='virtual_machines') + + # REUSABLE ARGUMENT DEFINITIONS + name_arg_type = CLIArgumentType(options_list=['--name', '-n'], metavar='NAME') + multi_ids_type = CLIArgumentType(nargs='+') + existing_vm_name = CLIArgumentType(overrides=name_arg_type, + configured_default='vm', + help="The name of the Virtual Machine. You can configure the default using `az configure --defaults vm=`", + completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines'), id_part='name') + existing_disk_name = CLIArgumentType(overrides=name_arg_type, help='The name of the managed disk', completer=get_resource_name_completion_list('Microsoft.Compute/disks'), id_part='name') + existing_snapshot_name = CLIArgumentType(overrides=name_arg_type, help='The name of the snapshot', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots'), id_part='name') + vmss_name_type = CLIArgumentType(name_arg_type, + configured_default='vmss', + completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), + help="Scale set name. You can configure the default using `az configure --defaults vmss=`", + id_part='name') + + extension_instance_name_type = CLIArgumentType(help="Name of extension instance, which can be customized. Default: name of the extension.") + image_template_name_type = CLIArgumentType(overrides=name_arg_type, id_part='name') + disk_encryption_set_name = CLIArgumentType(overrides=name_arg_type, help='Name of disk encryption set.', id_part='name') + ephemeral_placement_type = CLIArgumentType(options_list=['--ephemeral-os-disk-placement', '--ephemeral-placement'], arg_type=get_enum_type(self.get_models('DiffDiskPlacement')), min_api='2019-12-01') + + license_type = CLIArgumentType( + help="Specifies that the Windows image or disk was licensed on-premises. To enable Azure Hybrid Benefit for " + "Windows Server, use 'Windows_Server'. To enable Multi-tenant Hosting Rights for Windows 10, " + "use 'Windows_Client'. For more information see the Azure Windows VM online docs.", + arg_type=get_enum_type(['Windows_Server', 'Windows_Client', 'RHEL_BYOS', 'SLES_BYOS', 'RHEL_BASE', + 'RHEL_SAPAPPS', 'RHEL_SAPHA', 'RHEL_EUS', 'RHEL_BASESAPAPPS', 'RHEL_BASESAPHA', 'SLES_STANDARD', 'SLES', 'SLES_SAP', 'SLES_HPC', + 'None', 'RHEL_ELS_6', 'UBUNTU_PRO', 'UBUNTU'])) + + # StorageAccountTypes renamed to DiskStorageAccountTypes in 2018_06_01 of azure-mgmt-compute + DiskStorageAccountTypes = DiskStorageAccountTypes or self.get_models('StorageAccountTypes') + + if DiskStorageAccountTypes: + disk_sku = CLIArgumentType(arg_type=get_enum_type(DiskStorageAccountTypes)) + else: + # StorageAccountTypes introduced in api version 2016_04_30_preview of Resource.MGMT.Compute package.. + # However, 2017-03-09-profile targets version 2016-03-30 of compute package. + disk_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) + + if SnapshotStorageAccountTypes: + snapshot_sku = CLIArgumentType(arg_type=get_enum_type(SnapshotStorageAccountTypes)) + else: + # SnapshotStorageAccountTypes introduced in api version 2018_04_01 of Resource.MGMT.Compute package.. + # However, 2017-03-09-profile targets version 2016-03-30 of compute package. + snapshot_sku = CLIArgumentType(arg_type=get_enum_type(['Premium_LRS', 'Standard_LRS'])) + + # special case for `network nic scale-set list` command alias + with self.argument_context('network nic scale-set list') as c: + c.argument('virtual_machine_scale_set_name', options_list=['--vmss-name'], completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets'), id_part='name') + + HyperVGenerationTypes = HyperVGenerationTypes or self.get_models('HyperVGeneration', operation_group='disks') + if HyperVGenerationTypes: + hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(HyperVGenerationTypes, default="V1")) + else: + hyper_v_gen_sku = CLIArgumentType(arg_type=get_enum_type(["V1", "V2"], default="V1")) + disk_snapshot_hyper_v_gen_sku = get_enum_type(HyperVGenerationTypes) if HyperVGenerationTypes else get_enum_type(["V1", "V2"]) + + ultra_ssd_enabled_type = CLIArgumentType( + arg_type=get_three_state_flag(), min_api='2018-06-01', + help='Enables or disables the capability to have 1 or more managed data disks with UltraSSD_LRS storage account') + + scale_in_policy_type = CLIArgumentType( + nargs='+', arg_type=get_enum_type(self.get_models('VirtualMachineScaleSetScaleInRules')), + help='Specify the scale-in policy (space delimited) that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.' + ) + + edge_zone_type = CLIArgumentType( + help='The name of edge zone.', + min_api='2020-12-01' + ) + + t_shared_to = self.get_models('SharedToValues', operation_group='shared_galleries') + shared_to_type = CLIArgumentType( + arg_type=get_enum_type(t_shared_to), + help='The query parameter to decide what shared galleries to fetch when doing listing operations. ' + 'If not specified, list by subscription id.' + ) + + marker_type = CLIArgumentType( + help='A string value that identifies the portion of the list of containers to be ' + 'returned with the next listing operation. The operation returns the NextMarker value within ' + 'the response body if the listing operation did not return all containers remaining to be listed ' + 'with the current page. If specified, this generator will begin returning results from the point ' + 'where the previous generator stopped.') + + enable_vtpm_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable vTPM.') + enable_secure_boot_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2020-12-01', help='Enable secure boot.') + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior after changing the default values to Trusted Launch VMs in the future. + t_security = [x.value for x in self.get_models('SecurityTypes') or []] + [COMPATIBLE_SECURITY_TYPE_VALUE] + security_type = CLIArgumentType(arg_type=get_enum_type(t_security), min_api='2020-12-01', help='Specify the security type of the virtual machine.') + enable_auto_os_upgrade_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-10-01', + help='Indicate whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the OS image becomes available.') + gallery_image_name_type = CLIArgumentType(options_list=['--gallery-image-definition', '-i'], help='The name of the community gallery image definition from which the image versions are to be listed.', id_part='child_name_2') + gallery_image_name_version_type = CLIArgumentType(options_list=['--gallery-image-version', '-e'], help='The name of the gallery image version to be created. Needs to follow semantic version name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. Format: ..', id_part='child_name_3') + public_gallery_name_type = CLIArgumentType(help='The public name of community gallery.', id_part='child_name_1') + disk_controller_type = CLIArgumentType(help='Specify the disk controller type configured for the VM or VMSS.', arg_type=get_enum_type(DiskControllerTypes), arg_group='Storage', is_preview=True) + + # region MixedScopes + for scope in ['vm', 'disk', 'snapshot', 'image', 'sig']: + with self.argument_context(scope) as c: + c.argument('tags', tags_type) + + for scope in ['disk', 'snapshot']: + with self.argument_context(scope) as c: + c.ignore('source_blob_uri', 'source_disk', 'source_snapshot', 'source_restore_point') + c.argument('source_storage_account_id', help='used when source blob is in a different subscription') + c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) + c.argument('duration_in_seconds', help='Time duration in seconds until the SAS access expires', type=int) + if self.supported_api_version(min_api='2018-09-30', operation_group='disks'): + c.argument('access_level', arg_type=get_enum_type(['Read', 'Write']), default='Read', help='access level') + c.argument('hyper_v_generation', arg_type=disk_snapshot_hyper_v_gen_sku, help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') + else: + c.ignore('access_level', 'for_upload', 'hyper_v_generation') + c.argument('encryption_type', min_api='2019-07-01', arg_type=get_enum_type(self.get_models('EncryptionType', operation_group='disks')), + help='Encryption type. EncryptionAtRestWithPlatformKey: Disk is encrypted with XStore managed key at rest. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted with Customer managed key at rest.') + c.argument('disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set that is used to encrypt the disk.') + c.argument('location', help='Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=`. If location is not specified and no default location specified, location will be automatically set as same as the resource group.') + operation_group = 'disks' if scope == 'disk' else 'snapshots' + c.argument('network_access_policy', min_api='2020-05-01', help='Policy for accessing the disk via network.', arg_type=get_enum_type(self.get_models('NetworkAccessPolicy', operation_group=operation_group))) + c.argument('disk_access', min_api='2020-05-01', help='Name or ID of the disk access resource for using private endpoints on disks.') + c.argument('enable_bursting', arg_type=get_three_state_flag(), help='Enable on-demand bursting beyond the provisioned performance target of the disk. On-demand bursting is disabled by default, and it does not apply to Ultra disks.') + c.argument('public_network_access', arg_type=get_enum_type(['Disabled', 'Enabled']), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to control the export policy on the disk.') + c.argument('accelerated_network', arg_type=get_three_state_flag(), min_api='2021-04-01', is_preview=True, help='Customers can set on Managed Disks or Snapshots to enable the accelerated networking if the OS disk image support.') + + for scope in ['disk create', 'snapshot create']: + with self.argument_context(scope) as c: + c.argument('source', help='source to create the disk/snapshot from, including unmanaged blob uri, managed disk id or name, or snapshot id or name') + c.argument('secure_vm_disk_encryption_set', min_api='2021-08-01', help='Name or ID of disk encryption set created with ConfidentialVmEncryptedWithCustomerKey encryption type.') + # endregion + + # region Disks + with self.argument_context('disk grant-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: + c.argument('secure_vm_guest_state_sas', options_list=['--secure-vm-guest-state-sas', '-s'], min_api='2022-03-02', + action='store_true', validator=validate_secure_vm_guest_state_sas, + help="Get SAS on managed disk with VM guest state. It will be used by default when the create option of disk is 'secureOSUpload'") + # endregion + + # region Disks + with self.argument_context('disk', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior after changing the default values to Trusted Launch VMs in the future. + t_disk_security = [x.value for x in self.get_models('DiskSecurityTypes', operation_group='disks') or []] + [COMPATIBLE_SECURITY_TYPE_VALUE] + + c.argument('zone', zone_type, min_api='2017-03-30', options_list=['--zone']) # TODO: --size-gb currently has claimed -z. We can do a breaking change later if we want to. + c.argument('disk_name', existing_disk_name, completer=get_resource_name_completion_list('Microsoft.Compute/disks')) + c.argument('name', arg_type=name_arg_type) + c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') + c.argument('os_type', arg_type=get_enum_type(OperatingSystemTypes), help='The Operating System type of the Disk.') + c.argument('disk_iops_read_write', type=int, min_api='2018-06-01', help='The number of IOPS allowed for this disk. Only settable for UltraSSD disks. One operation can transfer between 4k and 256k bytes') + c.argument('disk_mbps_read_write', type=int, min_api='2018-06-01', help="The bandwidth allowed for this disk. Only settable for UltraSSD disks. MBps means millions of bytes per second with ISO notation of powers of 10") + c.argument('upload_size_bytes', type=int, min_api='2019-03-01', + help='The size (in bytes) of the contents of the upload including the VHD footer. Min value: 20972032. Max value: 35183298347520. This parameter is required if --upload-type is specified') + c.argument('max_shares', type=int, help='The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time') + c.argument('disk_iops_read_only', type=int, help='The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer between 4k and 256k bytes') + c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10') + c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk') + c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') + c.argument('gallery_image_reference', help='ID of the Compute, Shared or Community Gallery image version from which to create a disk. For details about valid format, please refer to the help sample') + c.ignore('gallery_image_reference_type') + c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null') + c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.') + c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.') + c.argument('edge_zone', edge_zone_type) + c.argument('security_type', arg_type=get_enum_type(t_disk_security), help='The security type of the VM. Applicable for OS disks only.', min_api='2020-12-01') + c.argument('support_hibernation', arg_type=get_three_state_flag(), help='Indicate the OS on a disk supports hibernation.', min_api='2020-12-01') + c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='disks')), min_api='2021-12-01', help='CPU architecture.') + c.argument('data_access_auth_mode', arg_type=get_enum_type(['AzureActiveDirectory', 'None']), min_api='2021-12-01', help='Specify the auth mode when exporting or uploading to a disk or snapshot.') + c.argument('optimized_for_frequent_attach', arg_type=get_three_state_flag(), min_api='2023-04-02', + help='Setting this property to true improves reliability and performance of data disks that are frequently (more than 5 times a day) by detached from one virtual machine and attached to another. ' + 'This property should not be set for disks that are not detached and attached frequently as it causes the disks to not align with the fault domain of the virtual machine.') + # endregion + + # region Disks + with self.argument_context('disk create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disks') as c: + c.argument('security_data_uri', min_api='2022-03-02', help='Please specify the blob URI of VHD to be imported into VM guest state') + c.argument('for_upload', arg_type=get_three_state_flag(), min_api='2018-09-30', + deprecate_info=c.deprecate(target='--for-upload', redirect='--upload-type Upload', hide=True), + help='Create the disk for uploading blobs. Replaced by "--upload-type Upload"') + c.argument('upload_type', arg_type=get_enum_type(['Upload', 'UploadWithSecurityData']), min_api='2018-09-30', + help="Create the disk for upload scenario. 'Upload' is for Standard disk only upload. 'UploadWithSecurityData' is for OS Disk upload along with VM Guest State. Please note the 'UploadWithSecurityData' is not valid for data disk upload, it only to be used for OS Disk upload at present.") + c.argument('performance_plus', arg_type=get_three_state_flag(), min_api='2022-07-02', help='Set this flag to true to get a boost on the performance target of the disk deployed. This flag can only be set on disk creation time and cannot be disabled after enabled') + # endregion + + # region Snapshots + with self.argument_context('snapshot', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: + c.argument('snapshot_name', existing_snapshot_name, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/snapshots')) + c.argument('name', arg_type=name_arg_type) + c.argument('sku', arg_type=snapshot_sku) + c.argument('incremental', arg_type=get_three_state_flag(), min_api='2019-03-01', + help='Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can be diffed') + c.argument('edge_zone', edge_zone_type) + c.argument('copy_start', arg_type=get_three_state_flag(), min_api='2021-04-01', + help='Create snapshot by using a deep copy process, where the resource creation is considered complete only after all data has been copied from the source.') + c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='snapshots')), min_api='2021-12-01', help='CPU architecture.') + c.argument('for_upload', arg_type=get_three_state_flag(), min_api='2018-09-30', + help='Create the snapshot for uploading blobs later on through storage commands. Run "az snapshot grant-access --access-level Write" to retrieve the snapshot\'s SAS token.') + c.argument('elastic_san_resource_id', min_api='2023-04-02', + options_list=['--elastic-san-resource-id', '--elastic-san-id'], + help='This is the ARM id of the source elastic san volume snapshot.') + c.argument('bandwidth_copy_speed', min_api='2023-10-02', + help='If this field is set on a snapshot and createOption is CopyStart, the snapshot will be copied at a quicker speed.', + arg_type=get_enum_type(["None", "Enhanced"])) + + with self.argument_context('snapshot grant-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='snapshots') as c: + c.argument('file_format', arg_type=get_enum_type(self.get_models('FileFormat', operation_group='snapshots')), help='Used to specify the file format when making request for SAS on a VHDX file format snapshot.') + # endregion + + # region Images + with self.argument_context('image') as c: + c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux'])) + c.argument('image_name', arg_type=name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/images')) + c.argument('tags', tags_type) + + with self.argument_context('image create') as c: + # here we collpase all difference image sources to under 2 common arguments --os-disk-source --data-disk-sources + c.argument('name', arg_type=name_arg_type, help='new image name') + c.argument('source', help='OS disk source from the same region, including a virtual machine ID or name, OS disk blob URI, managed OS disk ID or name, or OS snapshot ID or name') + c.argument('data_disk_sources', nargs='+', help='Space-separated list of data disk sources, including unmanaged blob URI, managed disk ID or name, or snapshot ID or name') + c.argument('zone_resilient', min_api='2017-12-01', arg_type=get_three_state_flag(), help='Specifies whether an image is zone resilient or not. ' + 'Default is false. Zone resilient images can be created only in regions that provide Zone Redundant Storage') + c.argument('storage_sku', arg_type=disk_sku, help='The SKU of the storage account with which to create the VM image. Unused if source VM is specified.') + c.argument('os_disk_caching', arg_type=get_enum_type(CachingTypes), help="Storage caching type for the image's OS disk.") + c.argument('data_disk_caching', arg_type=get_enum_type(CachingTypes), + help="Storage caching type for the image's data disk.") + c.argument('hyper_v_generation', arg_type=hyper_v_gen_sku, min_api="2019-03-01", help='The hypervisor generation of the Virtual Machine created from the image.') + c.ignore('source_virtual_machine', 'os_blob_uri', 'os_disk', 'os_snapshot', 'data_blob_uris', 'data_disks', 'data_snapshots') + c.argument('edge_zone', edge_zone_type, ) + # endregion + + # region Image Templates + with self.argument_context('image builder') as c: + ib_output_name_help = "Name of the image builder run output." + + c.argument('location', get_location_type(self.cli_ctx)) + c.argument('scripts', nargs='+', help="Space-separated list of shell or powershell scripts to customize the image with. Each script must be a publicly accessible URL." + " Infers type of script from file extension ('.sh' or'.ps1') or from source type. More more customizer options and flexibility, see: 'az image template customizer add'") + c.argument('source', options_list=["--image-source", "-i"], help="The base image to customize. Must be a valid platform image URN, platform image alias, Red Hat ISO image URI, managed image name/ID, or shared image version ID.") + c.argument('image_template_name', image_template_name_type, help="The name of the image template.") + c.argument('checksum', help="The SHA256 checksum of the Red Hat ISO image") + c.argument('managed_image_destinations', nargs='+', help='Managed image output distributor information. Space-separated list of key-value pairs. E.g "image_1=westus2 image_2=westus". Each key is the name or resource ID of the managed image to be created. Each value is the location of the image.') + c.argument('shared_image_destinations', nargs='+', help='Shared image gallery (sig) output distributor information. Space-separated list of key-value pairs. E.g "my_gallery_1/image_def_1=eastus,westus my_gallery_2/image_def_2=uksouth,canadaeast,francesouth." ' + 'Each key is the sig image definition ID or sig gallery name and sig image definition delimited by a "/". Each value is a comma-delimited list of replica locations.') + c.argument('output_name', help=ib_output_name_help) + c.ignore('destinations_lists', 'scripts_list', 'source_dict') + + with self.argument_context('image builder create') as c: + ib_source_type = CLIArgumentType(arg_group="Image Source") + ib_customizer_type = CLIArgumentType(arg_group="Customizer") + ib_cutput_type = CLIArgumentType(arg_group="Output") + + c.argument('build_timeout', type=int, help="The Maximum duration to wait while building the image template, in minutes. Default is 60.") + c.argument('image_template', help='Local path or URL to an image template file. When using --image-template, all other parameters are ignored except -g and -n. Reference: https://docs.microsoft.com/azure/virtual-machines/linux/image-builder-json') + c.argument('identity', nargs='+', help='List of user assigned identities (name or ID, space delimited) of the image template.') + c.argument('staging_resource_group', min_api='2022-02-14', help='The staging resource group id in the same subscription as the image template that will be used to build the image.') + + # VM profile + c.argument('vm_size', help='Size of the virtual machine used to build, customize and capture images. Omit or specify empty string to use the default (Standard_D1_v2)') + c.argument('os_disk_size', type=int, help='Size of the OS disk in GB. Omit or specify 0 to use Azure\'s default OS disk size') + c.argument('vnet', help='Name of VNET to deploy the build virtual machine. You should only specify it when subnet is a name') + c.argument('subnet', help='Name or ID of subnet to deploy the build virtual machine') + c.argument('proxy_vm_size', help='Size of the virtual machine used to build, customize and capture images (Standard_D1_v2 for Gen1 images and Standard_D2ds_v4 for Gen2 images).') + c.argument('build_vm_identities', nargs='+', help='Optional configuration of the virtual network to use to deploy the build virtual machine in. Omit if no specific virtual network needs to be used.') + c.argument('validator', nargs='+', min_api='2022-07-01', + help='The type of validation you want to use on the Image. For example, "Shell" can be shell validation.') + + # Image Source Arguments + c.argument('source', arg_type=ib_source_type) + c.argument('checksum', arg_type=ib_source_type) + c.argument('', arg_type=ib_source_type) + + # Image Customizer Arguments + c.argument('scripts', arg_type=ib_customizer_type) + c.argument('', arg_type=ib_customizer_type) + c.argument('', arg_type=ib_customizer_type) + + # Image Output Arguments + c.argument('managed_image_destinations', arg_type=ib_cutput_type) + c.argument('shared_image_destinations', arg_type=ib_cutput_type) + c.argument('output_name', arg_type=ib_cutput_type) + + for scope in ['image builder identity assign', 'image builder identity remove']: + with self.argument_context(scope, min_api='2022-02-14') as c: + c.argument('user_assigned', arg_group='Managed Identity', nargs='*', help='Specify one user assigned identity (name or ID, space delimited) of the image template.') + + with self.argument_context('image builder output') as c: + ib_sig_regions_help = "Space-separated list of regions to replicate the image version into." + ib_img_location_help = "Location where the customized image will be created." + + c.argument('gallery_image_definition', arg_group="Shared Image Gallery", help="Name or ID of the existing SIG image definition to create the customized image version with.") + c.argument('gallery_name', arg_group="Shared Image Gallery", help="Shared image gallery name, if image definition name and not ID was provided.") + c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help) + c.argument('managed_image', arg_group="Managed Image", help="Name or ID of the customized managed image to be created.") + c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help) + + with self.argument_context('image builder output add') as c: + ib_artifact_tags_help = "Tags that will be applied to the output artifact once it has been created by the distributor. " + tags_type.settings['help'] + ib_artifact_tags_type = CLIArgumentType(overrides=tags_type, help=ib_artifact_tags_help, options_list=["--artifact-tags"]) + ib_default_loc_help = " Defaults to resource group's location." + + c.argument('output_name', help=ib_output_name_help + " Defaults to the name of the managed image or sig image definition.") + c.argument('gallery_replication_regions', arg_group="Shared Image Gallery", nargs='+', help=ib_sig_regions_help + ib_default_loc_help) + c.argument('managed_image_location', arg_group="Managed Image", help=ib_img_location_help + ib_default_loc_help) + c.argument('is_vhd', arg_group="VHD", help="The output is a VHD distributor.", action='store_true') + c.argument('vhd_uri', arg_group="VHD", help="Optional Azure Storage URI for the distributed VHD blob. Omit to use the default (empty string) in which case VHD would be published to the storage account in the staging resource group.") + c.argument('versioning', get_enum_type(['Latest', 'Source']), help="Describe how to generate new x.y.z version number for distribution.") + c.argument('tags', arg_type=ib_artifact_tags_type) + c.ignore('location') + + with self.argument_context('image builder output versioning set') as c: + c.argument('scheme', get_enum_type(['Latest', 'Source']), help='Version numbering scheme to be used.') + c.argument('major', type=int, help='Major version for the generated version number. Determine what is "latest" based on versions with this value as the major version. -1 is equivalent to leaving it unset.') + + with self.argument_context('image builder customizer') as c: + ib_win_restart_type = CLIArgumentType(arg_group="Windows Restart") + ib_win_update_type = CLIArgumentType(arg_group="Windows Update") + ib_script_type = CLIArgumentType(arg_group="Shell and Powershell") + ib_powershell_type = CLIArgumentType(arg_group="Powershell") + ib_file_customizer_type = CLIArgumentType(arg_group="File") + + c.argument('customizer_name', help="Name of the customizer.") + c.argument('customizer_type', options_list=['--type', '-t'], help="Type of customizer to be added to the image template.", arg_type=get_enum_type(ScriptType)) + + # Script Args + c.argument('script_url', arg_type=ib_script_type, help="URL of script to customize the image with. The URL must be publicly accessible.") + c.argument('inline_script', arg_type=ib_script_type, nargs='+', help="Space-separated list of inline script lines to customize the image with.") + + # Powershell Specific Args + c.argument('valid_exit_codes', options_list=['--exit-codes', '-e'], arg_type=ib_powershell_type, nargs='+', help="Space-separated list of valid exit codes, as integers") + + # Windows Restart Specific Args + c.argument('restart_command', arg_type=ib_win_restart_type, help="Command to execute the restart operation.") + c.argument('restart_check_command', arg_type=ib_win_restart_type, help="Command to verify that restart succeeded.") + c.argument('restart_timeout', arg_type=ib_win_restart_type, help="Restart timeout specified as a string consisting of a magnitude and unit, e.g. '5m' (5 minutes) or '2h' (2 hours)", default="5m") + + # Windows Update Specific Args + c.argument('search_criteria', arg_type=ib_win_update_type, help='Criteria to search updates. Omit or specify empty string to use the default (search all). Refer to above link for examples and detailed description of this field.') + c.argument('filters', arg_type=ib_win_update_type, nargs='+', help='Space delimited filters to select updates to apply. Omit or specify empty array to use the default (no filter)') + c.argument('update_limit', arg_type=ib_win_update_type, help='Maximum number of updates to apply at a time. Omit or specify 0 to use the default (1000)') + + # File Args + c.argument('file_source', arg_type=ib_file_customizer_type, help="The URI of the file to be downloaded into the image. It can be a github link, SAS URI for Azure Storage, etc.") + c.argument('dest_path', arg_type=ib_file_customizer_type, help="The absolute destination path where the file specified in --file-source will be downloaded to in the image") + + with self.argument_context('image builder validator add', min_api='2022-02-14') as c: + c.argument('dis_on_failure', options_list=['--continue-distribute-on-failure', '--dis-on-failure'], arg_type=get_three_state_flag(), help="If validation fails and this parameter is set to false, output image(s) will not be distributed.") + c.argument('source_validation_only', arg_type=get_three_state_flag(), help="If this parameter is set to true, the image specified in the 'source' section will directly be validated. No separate build will be run to generate and then validate a customized image.") + + for scope in ['image builder optimizer add', 'image builder optimizer update']: + with self.argument_context(scope, min_api='2022-07-01') as c: + c.argument('enable_vm_boot', arg_type=get_three_state_flag(), help='If this parameter is set to true, VM boot time will be improved by optimizing the final customized image output.') + + with self.argument_context('image builder error-handler add', min_api='2023-07-01') as c: + from azure.mgmt.imagebuilder.models import OnBuildError + c.argument('on_customizer_error', arg_type=get_enum_type(OnBuildError), + help='If there is a customizer error and this field is set to "cleanup", the build VM and associated network resources will be cleaned up. This is the default behavior. ' + 'If there is a customizer error and this field is set to "abort", the build VM will be preserved.') + c.argument('on_validation_error', arg_type=get_enum_type(OnBuildError), + help='If there is a validation error and this field is set to "cleanup", the build VM and associated network resources will be cleaned up. This is the default behavior. ' + 'If there is a validation error and this field is set to "abort", the build VM will be preserved.') + # endregion + + # region AvailabilitySets + with self.argument_context('vm availability-set') as c: + c.argument('availability_set_name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') + + with self.argument_context('vm availability-set create') as c: + c.argument('availability_set_name', name_arg_type, validator=get_default_location_from_resource_group, help='Name of the availability set') + c.argument('platform_update_domain_count', type=int, help='Update Domain count. If unspecified, the server will pick the most optimal number like 5.') + c.argument('platform_fault_domain_count', type=int, help='Fault Domain count.') + c.argument('validate', help='Generate and validate the ARM template without creating any resources.', action='store_true') + c.argument('unmanaged', action='store_true', min_api='2016-04-30-preview', help='contained VMs should use unmanaged disks') + + with self.argument_context('vm availability-set update') as c: + if self.supported_api_version(max_api='2016-04-30-preview', operation_group='virtual_machines'): + c.argument('name', name_arg_type, id_part='name', completer=get_resource_name_completion_list('Microsoft.Compute/availabilitySets'), help='Name of the availability set') + c.argument('availability_set_name', options_list=['--availability-set-name']) + # endregion + + # region VirtualMachines + with self.argument_context('vm') as c: + c.argument('vm_name', existing_vm_name) + c.argument('size', completer=get_vm_size_completion_list) + c.argument('name', arg_type=name_arg_type) + c.argument('zone', zone_type, min_api='2017-03-30') + c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) + c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none.', arg_group='Network') + c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux.', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH'])) + c.argument('application_security_groups', min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network') + c.argument('workspace', is_preview=True, arg_group='Monitor', help='Name or ID of Log Analytics Workspace. If you specify the workspace through its name, the workspace should be in the same resource group with the vm, otherwise a new workspace will be created.') + with self.argument_context('vm capture') as c: + c.argument('overwrite', action='store_true') + + with self.argument_context('vm update') as c: + c.argument('os_disk', min_api='2017-12-01', help="Managed OS disk ID or name to swap to") + c.argument('write_accelerator', nargs='*', min_api='2017-12-01', + help="enable/disable disk write accelerator. Use singular value 'true/false' to apply across, or specify individual disks, e.g.'os=true 1=true 2=true' for os disk and data disks with lun of 1 & 2") + c.argument('disk_caching', nargs='*', help="Use singular value to apply across, or specify individual disks, e.g. 'os=ReadWrite 0=None 1=ReadOnly' should enable update os disk and 2 data disks") + c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) + c.argument('enable_secure_boot', enable_secure_boot_type) + c.argument('enable_vtpm', enable_vtpm_type) + c.argument('size', help='The new size of the virtual machine. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) + c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, + help='Only applicable when used with `--size`. Allows you to choose the Ephemeral OS disk provisioning location.') + c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') + c.argument('security_type', arg_type=get_enum_type(["TrustedLaunch"], default=None), min_api='2022-11-01', help='Specify the security type of the virtual machine.') + + with self.argument_context('vm create') as c: + c.argument('name', name_arg_type, validator=_resource_not_exists(self.cli_ctx, 'Microsoft.Compute/virtualMachines')) + c.argument('vm_name', name_arg_type, id_part=None, help='Name of the virtual machine.', completer=None) + c.argument('os_disk_size_gb', type=int, help='the size of the os disk in GB', arg_group='Storage') + c.argument('availability_set', help='Name or ID of an existing availability set to add the VM to. None by default.') + c.argument('vmss', help='Name or ID of an existing virtual machine scale set that the virtual machine should be assigned to. None by default.') + c.argument('nsg', help='The name to use when creating a new Network Security Group (default) or referencing an existing one. Can also reference an existing NSG by ID or specify "" for none (\'""\' in Azure CLI using PowerShell or --% operator).', arg_group='Network') + c.argument('nsg_rule', help='NSG rule to create when creating a new NSG. Defaults to open ports for allowing RDP on Windows and allowing SSH on Linux. NONE represents no NSG rule', arg_group='Network', arg_type=get_enum_type(['RDP', 'SSH', 'NONE'])) + c.argument('application_security_groups', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-09-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) + c.argument('boot_diagnostics_storage', + help='pre-existing storage account name or its blob uri to capture boot diagnostics. Its sku should be one of Standard_GRS, Standard_LRS and Standard_RAGRS') + c.argument('accelerated_networking', resource_type=ResourceType.MGMT_NETWORK, min_api='2016-09-01', arg_type=get_three_state_flag(), arg_group='Network', + help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") + if self.supported_api_version(min_api='2019-03-01', resource_type=ResourceType.MGMT_COMPUTE): + VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) + c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', + arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), + help="The eviction policy for the Spot priority virtual machine. Default eviction policy is Deallocate for a Spot priority virtual machine") + c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', + help='Indicates whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') + c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', + help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') + c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', + help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') + c.argument('ssh_key_name', help='Use it as public key in virtual machine. It should be an existing SSH key resource in Azure.') + c.argument('enable_hotpatching', arg_type=get_three_state_flag(), help='Patch VMs without requiring a reboot. --enable-agent must be set and --patch-mode must be set to AutomaticByPlatform', min_api='2020-12-01') + c.argument('platform_fault_domain', min_api='2020-06-01', + help='Specify the scale set logical fault domain into which the virtual machine will be created. By default, the virtual machine will be automatically assigned to a fault domain that best maintains balance across available fault domains. This is applicable only if the virtualMachineScaleSet property of this virtual machine is set. The virtual machine scale set that is referenced, must have platform fault domain count. This property cannot be updated once the virtual machine is created. Fault domain assignment can be viewed in the virtual machine instance view') + c.argument('count', type=int, is_preview=True, + help='Number of virtual machines to create. Value range is [2, 250], inclusive. Don\'t specify this parameter if you want to create a normal single VM. The VMs are created in parallel. The output of this command is an array of VMs instead of one single VM. Each VM has its own public IP, NIC. VNET and NSG are shared. It is recommended that no existing public IP, NIC, VNET and NSG are in resource group. When --count is specified, --attach-data-disks, --attach-os-disk, --boot-diagnostics-storage, --computer-name, --host, --host-group, --nics, --os-disk-name, --private-ip-address, --public-ip-address, --public-ip-address-dns-name, --storage-account, --storage-container-name, --subnet, --use-unmanaged-disk, --vnet-name are not allowed.') + c.argument('security_type', security_type) + c.argument('enable_secure_boot', enable_secure_boot_type) + c.argument('enable_vtpm', enable_vtpm_type) + c.argument('user_data', help='UserData for the VM. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') + c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VM.') + + with self.argument_context('vm create', arg_group='Storage') as c: + c.argument('attach_os_disk', help='Attach an existing OS disk to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') + c.argument('attach_data_disks', nargs='+', help='Attach existing data disks to the VM. Can use the name or ID of a managed disk or the URI to an unmanaged disk VHD.') + c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2021-03-01', help='Specify the behavior of the managed disk when the VM gets deleted i.e whether the managed disk is deleted or detached.') + c.argument('data_disk_delete_option', options_list=['--data-disk-delete-option', self.deprecate(target='--data-delete-option', redirect='--data-disk-delete-option', hide=True)], nargs='+', min_api='2021-03-01', help='Specify whether data disk should be deleted or detached upon VM deletion. If a single data disk is attached, the allowed values are Delete and Detach. For multiple data disks are attached, please use "=Delete =Detach" to configure each disk') + c.argument('source_snapshots_or_disks', options_list=['--source-snapshots-or-disks', '--source-resource'], nargs='+', min_api='2024-03-01', help='Create a data disk from a snapshot or another disk. Can use the ID of a disk or snapshot.') + c.argument('source_snapshots_or_disks_size_gb', options_list=['--source-snapshots-or-disks-size-gb', '--source-resource-size'], nargs='+', type=int, min_api='2024-03-01', help='The size of the source disk in GB') + c.argument('source_disk_restore_point', options_list=['--source-disk-restore-point', '--source-disk-rp'], nargs='+', min_api='2024-03-01', help='create a data disk from a disk restore point. Can use the ID of a disk restore point.') + c.argument('source_disk_restore_point_size_gb', options_list=['--source-disk-restore-point-size-gb', '--source-rp-size'], nargs='+', type=int, min_api='2024-03-01', help='The size of the source disk restore point in GB') + + with self.argument_context('vm create', arg_group='Dedicated Host', min_api='2019-03-01') as c: + c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together.") + c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together.") + + with self.argument_context('vm update', arg_group='Dedicated Host', min_api='2019-03-01') as c: + c.argument('dedicated_host_group', options_list=['--host-group'], is_preview=True, help="Name or resource ID of the dedicated host group that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") + c.argument('dedicated_host', options_list=['--host'], is_preview=True, help="Resource ID of the dedicated host that the VM will reside in. --host and --host-group can't be used together. You should deallocate the VM before update, and start the VM after update. Please check out help for more examples.") + + with self.argument_context('vm open-port') as c: + c.argument('vm_name', name_arg_type, help='The name of the virtual machine to open inbound traffic on.') + c.argument('network_security_group_name', options_list=('--nsg-name',), help='The name of the network security group to create if one does not exist. Ignored if an NSG already exists.', validator=validate_nsg_name) + c.argument('apply_to_subnet', help='Allow inbound traffic on the subnet instead of the NIC', action='store_true') + c.argument('port', help="The port or port range (ex: 80-100) to open inbound traffic to. Use '*' to allow traffic to all ports. Use comma separated values to specify more than one port or port range.") + c.argument('priority', help='Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection.', type=int) + + with self.argument_context('vm list') as c: + c.argument('vmss', min_api='2021-11-01', help='List VM instances in a specific VMSS. Please specify the VMSS id or VMSS name') + + for scope in ['vm show', 'vm list']: + with self.argument_context(scope) as c: + c.argument('show_details', action='store_true', options_list=['--show-details', '-d'], help='show public ip address, FQDN, and power states. command will run slow') + + for scope in ['vm show', 'vmss show']: + with self.argument_context(scope) as c: + c.argument('include_user_data', action='store_true', options_list=['--include-user-data', '-u'], help='Include the user data properties in the query result.', min_api='2021-03-01') + + for scope in ['vm get-instance-view', 'vm wait', 'vmss wait']: + with self.argument_context(scope) as c: + c.ignore('include_user_data') + + with self.argument_context('vm diagnostics') as c: + c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) + + with self.argument_context('vm diagnostics set') as c: + c.argument('storage_account', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts')) + + with self.argument_context('vm install-patches') as c: + c.argument('maximum_duration', type=str, help='Specify the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such as PT4H (4 hours)') + c.argument('reboot_setting', arg_type=get_enum_type(RebootSetting), help='Define when it is acceptable to reboot a VM during a software update operation.') + c.argument('classifications_to_include_win', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationWindows), help='Space-separated list of classifications to include for Windows VM.') + c.argument('classifications_to_include_linux', nargs='+', arg_type=get_enum_type(VMGuestPatchClassificationLinux), help='Space-separated list of classifications to include for Linux VM.') + c.argument('kb_numbers_to_include', nargs='+', help='Space-separated list of KBs to include in the patch operation. Applicable to Windows VM only') + c.argument('kb_numbers_to_exclude', nargs='+', help='Space-separated list of KBs to exclude in the patch operation. Applicable to Windows VM only') + c.argument('exclude_kbs_requiring_reboot', arg_type=get_three_state_flag(), help="Filter out KBs that don't have a reboot behavior of 'NeverReboots' when this is set. Applicable to Windows VM only") + c.argument('package_name_masks_to_include', nargs='+', help='Space-separated list of packages to include in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') + c.argument('package_name_masks_to_exclude', nargs='+', help='Space-separated list of packages to exclude in the patch operation. Format: packageName_packageVersion. Applicable to Linux VM only') + c.argument('max_patch_publish_date', arg_type=get_datetime_type(help='ISO 8601 time value for install patch that were published on or before this given max published date.')) + + with self.argument_context('vm disk') as c: + c.argument('vm_name', options_list=['--vm-name'], id_part=None, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines')) + c.argument('new', action='store_true', help='create a new disk') + c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') + c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) + c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') + + with self.argument_context('vm disk attach') as c: + c.argument('enable_write_accelerator', min_api='2017-12-01', action='store_true', help='enable write accelerator') + c.argument('disk', options_list=['--name', '-n', c.deprecate(target='--disk', redirect='--name', hide=True)], + help="The name or ID of the managed disk", id_part='name', + completer=get_resource_name_completion_list('Microsoft.Compute/disks')) + c.argument('disks', nargs='*', help="One or more names or IDs of the managed disk (space-delimited).", + completer=get_resource_name_completion_list('Microsoft.Compute/disks')) + c.argument('ids', deprecate_info=c.deprecate(target='--ids', redirect='--disks', hide=True)) + c.argument('disk_ids', nargs='+', min_api='2024-03-01', help='The disk IDs of the managed disk (space-delimited).') + + with self.argument_context('vm disk detach') as c: + c.argument('disk_name', arg_type=name_arg_type, help='The data disk name.') + c.argument('force_detach', action='store_true', min_api='2020-12-01', help='Force detach managed data disks from a VM.') + c.argument('disk_ids', nargs='+', min_api='2024-03-01', help='The disk IDs of the managed disk (space-delimited).') + + with self.argument_context('vm encryption enable') as c: + c.argument('encrypt_format_all', action='store_true', help='Encrypts-formats data disks instead of encrypting them. Encrypt-formatting is a lot faster than in-place encryption but wipes out the partition getting encrypt-formatted. (Only supported for Linux virtual machines.)') + # Place aad arguments in their own group + aad_arguments = 'Azure Active Directory' + c.argument('aad_client_id', arg_group=aad_arguments) + c.argument('aad_client_secret', arg_group=aad_arguments) + c.argument('aad_client_cert_thumbprint', arg_group=aad_arguments) + + with self.argument_context('vm extension') as c: + c.argument('vm_extension_name', name_arg_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), help='Name of the extension.', id_part='child_name_1') + c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part='name') + c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(expiration='3.0.0', hide=True)) + + with self.argument_context('vm extension list') as c: + c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name']) + + with self.argument_context('vm extension show') as c: + c.argument('instance_view', action='store_true', help='The instance view of a virtual machine extension.') + + with self.argument_context('vm secret') as c: + c.argument('secrets', multi_ids_type, options_list=['--secrets', '-s'], help='Space-separated list of key vault secret URIs. Perhaps, produced by \'az keyvault secret list-versions --vault-name vaultname -n cert1 --query "[?attributes.enabled].id" -o tsv\'') + c.argument('keyvault', help='Name or ID of the key vault.', validator=validate_keyvault) + c.argument('certificate', help='key vault certificate name or its full secret URL') + c.argument('certificate_store', help='Windows certificate store names. Default: My') + + with self.argument_context('vm secret list') as c: + c.argument('vm_name', arg_type=existing_vm_name, id_part=None) + + with self.argument_context('vm image') as c: + c.argument('publisher_name', options_list=['--publisher', '-p'], help='image publisher') + c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') + c.argument('offer', options_list=['--offer', '-f'], help='image offer') + c.argument('plan', help='image billing plan') + c.argument('sku', options_list=['--sku', '-s'], help='image sku') + c.argument('version', help="image sku's version") + c.argument('urn', help="URN, in format of 'publisher:offer:sku:version' or 'publisher:offer:sku:edge_zone:version'. If specified, other argument values can be omitted") + + with self.argument_context('vm image list') as c: + c.argument('image_location', get_location_type(self.cli_ctx)) + c.argument('edge_zone', edge_zone_type) + c.argument('architecture', help='The name of architecture. ', arg_type=get_enum_type(["x64", "Arm64"])) + + with self.argument_context('vm image list-offers') as c: + c.argument('edge_zone', edge_zone_type) + + with self.argument_context('vm image list-skus') as c: + c.argument('edge_zone', edge_zone_type) + + with self.argument_context('vm image list-publishers') as c: + c.argument('edge_zone', edge_zone_type) + + with self.argument_context('vm image show') as c: + c.argument('skus', options_list=['--sku', '-s']) + c.argument('edge_zone', edge_zone_type) + + with self.argument_context('vm image terms') as c: + c.argument('urn', help='URN, in the format of \'publisher:offer:sku:version\'. If specified, other argument values can be omitted') + c.argument('publisher', help='Image publisher') + c.argument('offer', help='Image offer') + c.argument('plan', help='Image billing plan') + + with self.argument_context('vm nic') as c: + c.argument('vm_name', existing_vm_name, options_list=['--vm-name'], id_part=None) + c.argument('nics', nargs='+', help='Names or IDs of NICs.', validator=validate_vm_nics) + c.argument('primary_nic', help='Name or ID of the primary NIC. If missing, the first NIC in the list will be the primary.') + + with self.argument_context('vm nic show') as c: + c.argument('nic', help='NIC name or ID.', validator=validate_vm_nic) + + with self.argument_context('vm unmanaged-disk') as c: + c.argument('new', action='store_true', help='Create a new disk.') + c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine size.') + c.argument('vhd_uri', help="Virtual hard disk URI. For example: https://mystorage.blob.core.windows.net/vhds/d1.vhd") + + with self.argument_context('vm unmanaged-disk attach') as c: + c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') + c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) + + with self.argument_context('vm unmanaged-disk detach') as c: + c.argument('disk_name', options_list=['--name', '-n'], help='The data disk name.') + + for scope in ['vm unmanaged-disk attach', 'vm unmanaged-disk detach']: + with self.argument_context(scope) as c: + c.argument('vm_name', arg_type=existing_vm_name, options_list=['--vm-name'], id_part=None) + + with self.argument_context('vm unmanaged-disk list') as c: + c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) + + with self.argument_context('vm user') as c: + c.argument('username', options_list=['--username', '-u'], help='The user name') + c.argument('password', options_list=['--password', '-p'], help='The user password') + + with self.argument_context('vm list-skus') as c: + c.argument('size', options_list=['--size', '-s'], help="size name, partial name is accepted") + c.argument('zone', options_list=['--zone', '-z'], arg_type=get_three_state_flag(), help="show skus supporting availability zones") + c.argument('show_all', options_list=['--all'], arg_type=get_three_state_flag(), + help="show all information including vm sizes not available under the current subscription") + c.argument('resource_type', options_list=['--resource-type', '-r'], help='resource types e.g. "availabilitySets", "snapshots", "disks", etc') + + with self.argument_context('vm restart') as c: + c.argument('force', action='store_true', help='Force the VM to restart by redeploying it. Use if the VM is unresponsive.') + + with self.argument_context('vm host') as c: + c.argument('host_group_name', options_list=['--host-group'], id_part='name', help="Name of the Dedicated Host Group") + c.argument('host_name', name_arg_type, id_part='child_name_1', help="Name of the Dedicated Host") + c.ignore('expand') + + with self.argument_context('vm host create') as c: + c.argument('platform_fault_domain', options_list=['--platform-fault-domain', '-d'], type=int, + help="Fault domain of the host within a group. Allowed values: 0, 1, 2") + c.argument('auto_replace_on_failure', options_list=['--auto-replace'], arg_type=get_three_state_flag(), + help="Replace the host automatically if a failure occurs") + c.argument('license_type', arg_type=get_enum_type(DedicatedHostLicenseTypes), + help="The software license type that will be applied to the VMs deployed on the dedicated host.") + c.argument('sku', help="SKU of the dedicated host. Available SKUs: https://azure.microsoft.com/pricing/details/virtual-machines/dedicated-host/") + + with self.argument_context('vm host group') as c: + c.argument('host_group_name', name_arg_type, id_part='name', help="Name of the Dedicated Host Group") + c.argument('automatic_placement', arg_type=get_three_state_flag(), min_api='2020-06-01', + help='Specify whether virtual machines or virtual machine scale sets can be placed automatically ' + 'on the dedicated host group. Automatic placement means resources are allocated on dedicated ' + 'hosts, that are chosen by Azure, under the dedicated host group. The value is defaulted to ' + 'false when not provided.') + + with self.argument_context('vm host group create') as c: + c.argument('platform_fault_domain_count', options_list=["--platform-fault-domain-count", "-c"], type=int, + help="Number of fault domains that the host group can span.") + c.argument('zones', zone_type) + c.argument('ultra_ssd_enabled', arg_type=get_three_state_flag(), min_api='2022-03-01', help='Enable a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated Host Group.') + + for scope in ["vm host", "vm host group"]: + with self.argument_context("{} create".format(scope)) as c: + location_type = get_location_type(self.cli_ctx) + custom_location_msg = " Otherwise, location will default to the resource group's location" + custom_location_type = CLIArgumentType(overrides=location_type, + help=location_type.settings["help"] + custom_location_msg) + c.argument('location', arg_type=custom_location_type) + # endregion + + # region VMSS + scaleset_name_aliases = ['vm_scale_set_name', 'virtual_machine_scale_set_name', 'name'] + + with self.argument_context('vmss') as c: + c.argument('zones', zones_type, min_api='2017-03-30') + c.argument('instance_id', id_part='child_name_1') + c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances. If not provided, the action will be applied on the scaleset itself') + c.argument('tags', tags_type) + c.argument('caching', help='Disk caching policy', arg_type=get_enum_type(CachingTypes)) + for dest in scaleset_name_aliases: + c.argument(dest, vmss_name_type) + c.argument('host_group', min_api='2020-06-01', + help='Name or ID of dedicated host group that the virtual machine scale set resides in') + + for scope in ['vmss deallocate', 'vmss delete-instances', 'vmss restart', 'vmss start', 'vmss stop', 'vmss show', 'vmss update-instances', 'vmss simulate-eviction']: + with self.argument_context(scope) as c: + for dest in scaleset_name_aliases: + c.argument(dest, vmss_name_type, id_part=None) # due to instance-ids parameter + + with self.argument_context('vmss deallocate', operation_group='virtual_machine_scale_sets') as c: + c.argument('hibernate', arg_type=get_three_state_flag(), help='Hibernate a virtual machine from the VM scale set. Available for VMSS with Flexible OrchestrationMode only.', min_api='2023-03-01') + + with self.argument_context('vmss reimage') as c: + c.argument('instance_ids', nargs='+', + help='Space-separated list of VM instance ID. If missing, reimage all instances.', + options_list=['--instance-ids', c.deprecate(target='--instance-id', redirect='--instance-ids', hide=True)]) + c.argument('force_update_os_disk_for_ephemeral', options_list=['--force-update-os-disk-for-ephemeral', '--update-os-disk'], arg_type=get_three_state_flag(), min_api='2024-03-01', help='Force update ephemeral OS disk for a virtual machine scale set VM.') + + with self.argument_context('vmss create', operation_group='virtual_machine_scale_sets') as c: + VirtualMachineEvictionPolicyTypes = self.get_models('VirtualMachineEvictionPolicyTypes', resource_type=ResourceType.MGMT_COMPUTE) + + c.argument('name', name_arg_type) + c.argument('nat_backend_port', default=None, help='Backend port to open with NAT rules. Defaults to 22 on Linux and 3389 on Windows.') + c.argument('single_placement_group', arg_type=get_three_state_flag(), help="Limit the scale set to a single placement group." + " See https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups for details.") + c.argument('platform_fault_domain_count', type=int, help='Fault Domain count for each placement group in the availability zone', min_api='2017-12-01') + c.argument('vmss_name', name_arg_type, id_part=None, help='Name of the virtual machine scale set.') + c.argument('instance_count', help='Number of VMs in the scale set.', type=int) + c.argument('disable_overprovision', help='Overprovision option (see https://azure.microsoft.com/documentation/articles/virtual-machine-scale-sets-overview/ for details).', action='store_true') + c.argument('health_probe', help='Probe name from the existing load balancer, mainly used for rolling upgrade or automatic repairs') + c.argument('vm_sku', help='Size of VMs in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') + c.argument('nsg', help='Name or ID of an existing Network Security Group.', arg_group='Network') + c.argument('eviction_policy', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', arg_type=get_enum_type(VirtualMachineEvictionPolicyTypes, default=None), + help="The eviction policy for virtual machines in a Spot priority scale set. Default eviction policy is Deallocate for a Spot priority scale set") + c.argument('application_security_groups', resource_type=ResourceType.MGMT_COMPUTE, min_api='2018-06-01', nargs='+', options_list=['--asgs'], help='Space-separated list of existing application security groups to associate with the VM.', arg_group='Network', validator=validate_asg_names_or_ids) + c.argument('computer_name_prefix', help='Computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long') + c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model. In Flexible mode, you manually create and add a virtual machine of any configuration to the scale set or generate identical instances based on virtual machine model defined for the scale set.', + arg_type=get_enum_type(['Uniform', 'Flexible']), default='Flexible', min_api='2020-12-01') + c.argument('orchestration_mode', help='Choose how virtual machines are managed by the scale set. In Uniform mode, you define a virtual machine model and Azure will generate identical instances based on that model.', + arg_type=get_enum_type(['Uniform']), default='Uniform', max_api='2020-09-30') + c.argument('scale_in_policy', scale_in_policy_type) + c.argument('automatic_repairs_grace_period', min_api='2018-10-01', + help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.') + c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') + c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') + c.argument('network_api_version', min_api='2021-03-01', + help="Specify the Microsoft.Network API version used when creating networking resources in the Network " + "Interface Configurations for Virtual Machine Scale Set with orchestration mode 'Flexible'. Default " + "value is 2020-11-01.") + c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') + c.argument('spot_restore_timeout', min_api='2021-04-01', help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') + c.argument('enable_agent', arg_type=get_three_state_flag(), min_api='2018-06-01', + help='Indicate whether virtual machine agent should be provisioned on the virtual machine. When this property is not specified, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later') + c.argument('enable_auto_update', arg_type=get_three_state_flag(), min_api='2020-06-01', + help='Indicate whether Automatic Updates is enabled for the Windows virtual machine') + c.argument('patch_mode', arg_type=get_enum_type(['AutomaticByOS', 'AutomaticByPlatform', 'Manual', 'ImageDefault']), min_api='2020-12-01', + help='Mode of in-guest patching to IaaS virtual machine. Allowed values for Windows VM: AutomaticByOS, AutomaticByPlatform, Manual. Allowed values for Linux VM: AutomaticByPlatform, ImageDefault. Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the VM. In this mode, automatic updates are disabled; the paramater --enable-auto-update must be false. AutomaticByOS - The virtual machine will automatically be updated by the OS. The parameter --enable-auto-update must be true. AutomaticByPlatform - the virtual machine will automatically updated by the OS. ImageDefault - The virtual machine\'s default patching configuration is used. The parameter --enable-agent and --enable-auto-update must be true') + c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VMSS.') + c.argument('security_type', security_type) + c.argument('enable_secure_boot', enable_secure_boot_type) + c.argument('enable_vtpm', enable_vtpm_type) + c.argument('os_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2022-03-01', arg_group='Storage', help='Specify whether OS disk should be deleted or detached upon VMSS Flex deletion (This feature is only for VMSS with flexible orchestration mode).') + c.argument('data_disk_delete_option', arg_type=get_enum_type(self.get_models('DiskDeleteOptionTypes')), min_api='2022-03-01', arg_group='Storage', help='Specify whether data disk should be deleted or detached upon VMSS Flex deletion (This feature is only for VMSS with flexible orchestration mode)') + c.argument('security_posture_reference_id', min_api='2023-03-01', + options_list=['--security-posture-reference-id', '--security-posture-id'], + help='The security posture reference id in the form of /CommunityGalleries/{communityGalleryName}/securityPostures/{securityPostureName}/versions/{major.minor.patch}|{major.*}|latest') + c.argument('security_posture_reference_exclude_extensions', min_api='2023-03-01', type=validate_file_or_dict, + options_list=['--security-posture-reference-exclude-extensions', '--exclude-extensions'], + help='List of virtual machine extensions to exclude when applying the Security Posture. Either a Json string or a file path is acceptable. ' + 'Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachinescalesets/get#virtualmachineextension for the data format.') + c.argument('skuprofile_vmsizes', nargs='+', min_api='2024-07-01', help='A list of VM sizes in the scale set. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') + c.argument('skuprofile_allostrat', options_list=['--skuprofile-allocation-strategy', '--sku-allocat-strat'], arg_type=get_enum_type(['LowestPrice', 'CapacityOptimized']), min_api='2024-07-01', help='Allocation strategy for vm sizes in SKU profile.') + + with self.argument_context('vmss create', arg_group='Network Balancer') as c: + c.argument('application_gateway', help='Name to use when creating a new application gateway (default) or referencing an existing one. Can also reference an existing application gateway by ID or specify "" for none.', options_list=['--app-gateway']) + c.argument('app_gateway_capacity', help='The number of instances to use when creating a new application gateway.') + c.argument('app_gateway_sku', help='SKU when creating a new application gateway.') + c.argument('app_gateway_subnet_address_prefix', help='The subnet IP address prefix to use when creating a new application gateway in CIDR format.') + c.argument('backend_pool_name', help='Name to use for the backend pool when creating a new load balancer or application gateway.') + c.argument('backend_port', help='When creating a new load balancer, backend port to open with NAT rules (Defaults to 22 on Linux and 3389 on Windows). When creating an application gateway, the backend port to use for the backend HTTP settings.', type=int) + c.argument('load_balancer', help='Name to use when creating a new load balancer (default) or referencing an existing one. Can also reference an existing load balancer by ID or specify "" for none.', options_list=['--load-balancer', '--lb']) + c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2017-08-01', max_api='2021-02-01', options_list=['--lb-sku'], arg_type=get_enum_type(['Basic', 'Standard']), + help="Sku of the Load Balancer to create. Default to 'Standard' when single placement group is turned off; otherwise, default to 'Basic'. The public IP is supported to be created on edge zone only when it is 'Standard'") + c.argument('load_balancer_sku', resource_type=ResourceType.MGMT_NETWORK, min_api='2021-02-01', options_list=['--lb-sku'], arg_type=get_enum_type(['Basic', 'Standard', 'Gateway'], default='Standard'), + help="Sku of the Load Balancer to create. The public IP is supported to be created on edge zone only when it is 'Standard'") + c.argument('nat_pool_name', help='Name to use for the NAT pool when creating a new load balancer.', options_list=['--lb-nat-pool-name', '--nat-pool-name'], deprecate_info=c.deprecate(target='--nat-pool-name', redirect='--nat-rule-name', hide=True)) + c.argument('nat_rule_name', help='Name to use for the NAT rule v2 when creating a new load balancer. (NAT rule V2 is used to replace NAT pool)', options_list=['--lb-nat-rule-name', '--nat-rule-name']) + + with self.argument_context('vmss create', min_api='2017-03-30', arg_group='Network') as c: + c.argument('public_ip_per_vm', action='store_true', help="Each VM instance will have a public ip. For security, you can use '--nsg' to apply appropriate rules") + c.argument('vm_domain_name', help="domain name of VM instances, once configured, the FQDN is `vm..<..rest..>`") + c.argument('dns_servers', nargs='+', help="space-separated IP addresses of DNS servers, e.g. 10.0.0.5 10.0.0.6") + c.argument('accelerated_networking', arg_type=get_three_state_flag(), + help="enable accelerated networking. Unless specified, CLI will enable it based on machine image and size") + + with self.argument_context('vmss update') as c: + protection_policy_type = CLIArgumentType(overrides=get_three_state_flag(), arg_group="Protection Policy", min_api='2019-03-01') + c.argument('protect_from_scale_in', arg_type=protection_policy_type, help="Protect the VM instance from scale-in operations.") + c.argument('protect_from_scale_set_actions', arg_type=protection_policy_type, help="Protect the VM instance from scale set actions (including scale-in).") + c.argument('enable_terminate_notification', min_api='2019-03-01', arg_type=get_three_state_flag(), + help='Enable terminate notification') + c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) + c.argument('scale_in_policy', scale_in_policy_type) + c.argument('force_deletion', action='store_true', is_preview=True, help='This property allow you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine scale set is being scaled-in.') + c.argument('user_data', help='UserData for the virtual machines in the scale set. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') + c.argument('enable_spot_restore', arg_type=get_three_state_flag(), min_api='2021-04-01', + help='Enable the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based on capacity availability and pricing constraints') + c.argument('spot_restore_timeout', min_api='2021-04-01', + help='Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances') + c.argument('vm_sku', help='The new size of the virtual machine instances in the scale set. Default to "Standard_DS1_v2". See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.', is_preview=True) + c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, + help='Only applicable when used with `--vm-sku`. Allows you to choose the Ephemeral OS disk provisioning location.') + c.argument('enable_hibernation', arg_type=get_three_state_flag(), min_api='2021-03-01', help='The flag that enable or disable hibernation capability on the VMSS.') + c.argument('enable_secure_boot', enable_secure_boot_type) + c.argument('enable_vtpm', enable_vtpm_type) + c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) + c.argument('security_type', arg_type=get_enum_type(["TrustedLaunch", "Standard"]), min_api='2020-06-01', help='Specify the security type of the virtual machine scale set.') + c.argument('security_posture_reference_id', min_api='2023-03-01', + options_list=['--security-posture-reference-id', '--security-posture-id'], + help='The security posture reference id in the form of /CommunityGalleries/{communityGalleryName}/securityPostures/{securityPostureName}/versions/{major.minor.patch}|{major.*}|latest') + c.argument('security_posture_reference_exclude_extensions', min_api='2023-03-01', type=validate_file_or_dict, + options_list=['--security-posture-reference-exclude-extensions', '--exclude-extensions'], + help='List of virtual machine extensions to exclude when applying the Security Posture. Either a Json string or a file path is acceptable. ' + 'Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachinescalesets/get#virtualmachineextension for the data format.') + c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2024-03-01', help='Allow you to specify the ephemeral disk settings for the operating system disk. Specify it to false to set ephemeral disk setting as empty and migrate it to non ephemeral') + c.argument('ephemeral_os_disk_option', options_list=['--ephemeral-os-disk-option', '--ephemeral-option'], arg_type=get_enum_type(self.get_models('DiffDiskOptions')), min_api='2024-03-01', help='Specify the ephemeral disk settings for operating system disk.') + c.argument('zones', zones_type, min_api='2023-03-01') + c.argument('skuprofile_vmsizes', nargs='+', min_api='2024-07-01', help='A list of VM sizes in the scale set. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') + c.argument('skuprofile_allostrat', options_list=['--skuprofile-allocation-strategy', '--sku-allocat-strat'], arg_type=get_enum_type(['LowestPrice', 'CapacityOptimized']), min_api='2024-07-01', help='Allocation strategy for vm sizes in SKU profile.') + + with self.argument_context('vmss update', min_api='2018-10-01', arg_group='Automatic Repairs') as c: + + c.argument('enable_automatic_repairs', arg_type=get_three_state_flag(), help='Enable automatic repairs') + c.argument( + 'automatic_repairs_grace_period', + help='The amount of time (in minutes, between 30 and 90) for which automatic repairs are suspended due to a state change on VM.' + ) + c.argument('automatic_repairs_action', arg_type=get_enum_type(['Replace', 'Restart', 'Reimage']), min_api='2021-11-01', help='Type of repair action that will be used for repairing unhealthy virtual machines in the scale set.') + + for scope in ['vmss create', 'vmss update']: + with self.argument_context(scope) as c: + c.argument('terminate_notification_time', min_api='2019-03-01', + help='Length of time (in minutes, between 5 and 15) a notification to be sent to the VM on the instance metadata server till the VM gets deleted') + c.argument('max_batch_instance_percent', type=int, min_api='2020-12-01', + help='The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. Default: 20%') + c.argument('max_unhealthy_instance_percent', type=int, min_api='2020-12-01', + help='The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy. Default: 20%') + c.argument('max_unhealthy_upgraded_instance_percent', type=int, min_api='2020-12-01', + help='The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. Default: 20%') + c.argument('pause_time_between_batches', min_api='2020-12-01', + help='The wait time between completing the update for all virtual machines in one batch and starting the next batch. Default: 0 seconds') + c.argument('enable_cross_zone_upgrade', arg_type=get_three_state_flag(), min_api='2020-12-01', + help='Set this Boolean property will allow VMSS to ignore AZ boundaries when constructing upgrade batches, and only consider Update Domain and maxBatchInstancePercent to determine the batch size') + c.argument('prioritize_unhealthy_instances', arg_type=get_three_state_flag(), min_api='2020-12-01', + help='Set this Boolean property will lead to all unhealthy instances in a scale set getting upgraded before any healthy instances') + c.argument('max_surge', arg_type=get_three_state_flag(), min_api='2022-11-01', is_preview=True, + help='Specify it to create new virtual machines to upgrade the scale set, rather than updating the existing virtual machines.') + c.argument('regular_priority_count', type=int, min_api='2022-08-01', is_preview=True, help='The base number of regular priority VMs that will be created in this scale set as it scales out. Must be greater than 0.') + c.argument('regular_priority_percentage', type=int, min_api='2022-08-01', is_preview=True, help='The percentage of VM instances, after the base regular priority count has been reached, that are expected to use regular priority. Must be between 0 and 100.') + c.argument('enable_osimage_notification', arg_type=get_three_state_flag(), min_api='2022-11-01', help='Specify whether the OS Image Scheduled event is enabled or disabled.') + c.argument('enable_resilient_creation', arg_type=get_three_state_flag(), min_api='2023-09-01', help='Automatically recover customers from OS Provisioning Timeout and VM Start Timeout errors experienced during a VM Create operation by deleting and recreating the affected VM.') + c.argument('enable_resilient_deletion', arg_type=get_three_state_flag(), min_api='2023-09-01', help='Retry VM Delete requests asynchronously in the event of a failed delete operation.') + c.argument('additional_scheduled_events', options_list=['--additional-scheduled-events', '--scheduled-event-additional-publishing-target-event-grid-and-resource-graph', '--additional-events'], arg_type=get_three_state_flag(), min_api='2024-03-01', help='The configuration parameter used while creating event grid and resource graph scheduled event setting.') + c.argument('enable_user_reboot_scheduled_events', options_list=['--enable-user-reboot-scheduled-events', '--enable-reboot'], arg_type=get_three_state_flag(), min_api='2024-03-01', help='The configuration parameter used while publishing scheduled events additional publishing targets.') + c.argument('enable_user_redeploy_scheduled_events', options_list=['--enable-user-redeploy-scheduled-events', '--enable-redeploy'], arg_type=get_three_state_flag(), min_api='2024-03-01', help='The configuration parameter used while creating user initiated redeploy scheduled event setting creation.') + c.argument('enable_auto_os_upgrade', enable_auto_os_upgrade_type) + c.argument('upgrade_policy_mode', help='Specify the mode of an upgrade to virtual machines in the scale set.', arg_type=get_enum_type(UpgradeMode)) + + with self.argument_context('vmss update') as c: + c.argument('instance_id', id_part='child_name_1', help="Update the VM instance with this ID. If missing, update the VMSS.") + with self.argument_context('vmss wait') as c: + c.argument('instance_id', id_part='child_name_1', help="Wait on the VM instance with this ID. If missing, wait on the VMSS.") + + for scope in ['vmss update-instances', 'vmss delete-instances']: + with self.argument_context(scope) as c: + c.argument('instance_ids', multi_ids_type, help='Space-separated list of IDs (ex: 1 2 3 ...) or * for all instances.') + + with self.argument_context('vmss diagnostics') as c: + c.argument('vmss_name', id_part=None, help='Scale set name') + + with self.argument_context('vmss disk') as c: + options_list = ['--vmss-name'] + [c.deprecate(target=opt, redirect='--vmss-name', hide=True)for opt in name_arg_type.settings['options_list']] + new_vmss_name_type = CLIArgumentType(overrides=vmss_name_type, options_list=options_list) + + c.argument('lun', type=int, help='0-based logical unit number (LUN). Max value depends on the Virtual Machine instance size.') + c.argument('size_gb', options_list=['--size-gb', '-z'], help='size in GB. Max size: 4095 GB (certain preview disks can be larger).', type=int) + c.argument('vmss_name', new_vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) + c.argument('disk', validator=validate_vmss_disk, help='existing disk name or ID to attach or detach from VM instances', + min_api='2017-12-01', completer=get_resource_name_completion_list('Microsoft.Compute/disks')) + c.argument('instance_id', help='Scale set VM instance id', min_api='2017-12-01') + c.argument('sku', arg_type=disk_sku, help='Underlying storage SKU') + + with self.argument_context('vmss encryption') as c: + c.argument('vmss_name', vmss_name_type, completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachineScaleSets')) + + with self.argument_context('vmss extension') as c: + c.argument('extension_name', name_arg_type, help='Name of the extension.') + c.argument('vmss_name', vmss_name_type, options_list=['--vmss-name'], id_part=None) + + with self.argument_context('vmss set-orchestration-service-state') as c: + c.argument('service_name', arg_type=get_enum_type(OrchestrationServiceNames), help='The name of the orchestration service.') + c.argument('action', arg_type=get_enum_type(OrchestrationServiceStateAction), help='The action to be performed.') + # endregion + + # region VM & VMSS Shared + for scope in ['vm', 'vmss']: + with self.argument_context(scope) as c: + c.argument('no_auto_upgrade', + options_list=['--no-auto-upgrade-minor-version', c.deprecate(target='--no-auto-upgrade', redirect='--no-auto-upgrade-minor-version')], + arg_type=get_three_state_flag(), + help='If set, the extension service will not automatically pick or upgrade to the latest minor version, even if the extension is redeployed.') + + for scope in ['vm', 'vmss']: + with self.argument_context('{} run-command'.format(scope)) as c: + c.argument('command_id', completer=get_vm_run_command_completion_list, help="The command id. Use 'az {} run-command list' to get the list".format(scope)) + if scope == 'vmss': + c.argument('vmss_name', vmss_name_type) + + for scope in ['vm', 'vmss']: + with self.argument_context('{} run-command invoke'.format(scope)) as c: + c.argument('parameters', nargs='+', help="space-separated parameters in the format of '[name=]value'") + c.argument('scripts', nargs='+', help="Space-separated script lines. Use @{file} to load script from a file") + + for scope in ['vm', 'vmss']: + with self.argument_context('{} stop'.format(scope)) as c: + c.argument('skip_shutdown', action='store_true', help='Skip shutdown and power-off immediately.', min_api='2019-03-01') + + run_cmd_name_type = CLIArgumentType(options_list=['--name', '--run-command-name'], help='The name of the virtual machine run command.') + run_cmd_vm_name = CLIArgumentType(options_list=['--vm-name'], help='The name of the virtual machine') + for scope in ['create', 'update']: + with self.argument_context('vm run-command {}'.format(scope)) as c: + c.argument('vm_name', run_cmd_vm_name) + c.argument('run_command_name', run_cmd_name_type) + c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, + validator=get_default_location_from_resource_group) + c.argument('tags', tags_type) + c.argument('script', help='Contain the powershell or bash script to execute on the VM.') + c.argument('script_uri', help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') + c.argument('command_id', help='Specify a command id of predefined script. All command ids can be listed using "list" command.') + c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') + c.argument('protected_parameters', nargs='+', help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') + c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' + 'will complete as soon as the script starts and will not wait for script to complete.') + c.argument('run_as_user', help='By default script process runs under system/root user. Specify custom user to host the process.') + c.argument('run_as_password', help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') + c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') + c.argument('output_blob_uri', help='Specify the Azure storage blob (SAS URI) where script output stream will be uploaded.') + c.argument('error_blob_uri', help='Specify the Azure storage blob where script error stream will be uploaded.') + + with self.argument_context('vm run-command delete') as c: + c.argument('vm_name', run_cmd_vm_name) + c.argument('run_command_name', run_cmd_name_type) + + with self.argument_context('vm run-command list') as c: + c.argument('vm_name', run_cmd_vm_name, id_part=None) + c.argument('expand', help='The expand expression to apply on the operation.') + c.argument('location', arg_type=get_location_type(self.cli_ctx)) + + with self.argument_context('vm run-command show') as c: + c.argument('vm_name', run_cmd_vm_name) + c.argument('run_command_name', run_cmd_name_type) + c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) + c.argument('instance_view', action='store_true', help='The instance view of a run command.') + c.argument('location', arg_type=get_location_type(self.cli_ctx)) + c.argument('command_id', help='The command id.') + + with self.argument_context('vm run-command wait') as c: + c.argument('vm_name', run_cmd_vm_name) + c.argument('run_command_name', run_cmd_name_type) + c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) + c.argument('instance_view', action='store_true', help='The instance view of a run command.') + c.argument('location', arg_type=get_location_type(self.cli_ctx)) + c.argument('command_id', help='The command id.') + + run_cmd_vmss_name = CLIArgumentType(options_list=['--vmss-name'], help='The name of the VM scale set.') + for scope in ['create', 'update']: + with self.argument_context('vmss run-command {}'.format(scope)) as c: + c.argument('vmss_name', run_cmd_vmss_name) + c.argument('instance_id', help='The instance ID of the virtual machine.') + c.argument('run_command_name', run_cmd_name_type) + c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, + validator=get_default_location_from_resource_group) + c.argument('tags', tags_type) + c.argument('script', help='Contain the powershell or bash script to execute on the VM.') + c.argument('script_uri', + help='Contain a uri to the script to execute on the VM. Uri can be any link accessible from the VM or a storage blob without SAS. If subscription has access to the storage blob, then SAS will be auto-generated. ') + c.argument('command_id', + help='Specify a command id of predefined script. All command ids can be listed using "list" command.') + c.argument('parameters', nargs='+', help='Set custom parameters in a name-value pair.') + c.argument('protected_parameters', nargs='+', + help='Set custom parameters in a name-value pair. These parameters will be encrypted during transmission and will not be logged.') + c.argument('async_execution', arg_type=get_three_state_flag(), help='Optional. If set to true, provisioning ' + 'will complete as soon as the script starts and will not wait for script to complete.') + c.argument('run_as_user', + help='By default script process runs under system/root user. Specify custom user to host the process.') + c.argument('run_as_password', + help='Password if needed for using run-as-user parameter. It will be encrypted and not logged. ') + c.argument('timeout_in_seconds', type=int, help='The timeout in seconds to execute the run command.') + c.argument('output_blob_uri', help='Uri (without SAS) to an append blob where the script output will be uploaded.') + c.argument('error_blob_uri', help='Uri (without SAS) to an append blob where the script error stream will be uploaded.') + + with self.argument_context('vmss run-command delete') as c: + c.argument('vmss_name', run_cmd_vmss_name) + c.argument('instance_id', help='The instance ID of the virtual machine.') + c.argument('run_command_name', run_cmd_name_type) + + with self.argument_context('vmss run-command list') as c: + c.argument('vmss_name', run_cmd_vmss_name, id_part=None) + c.argument('instance_id', help='The instance ID of the virtual machine.') + c.argument('expand', help='The expand expression to apply on the operation.') + + with self.argument_context('vmss run-command show') as c: + c.argument('vmss_name', run_cmd_vmss_name) + c.argument('instance_id', help='The instance ID of the virtual machine.') + c.argument('run_command_name', run_cmd_name_type) + c.argument('expand', help='The expand expression to apply on the operation.', deprecate_info=c.deprecate(hide=True)) + c.argument('instance_view', action='store_true', help='The instance view of a run command.') + + for scope in ['vm identity assign', 'vmss identity assign']: + with self.argument_context(scope) as c: + c.argument('assign_identity', options_list=['--identities'], nargs='*', help="Space-separated identities to assign. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) + c.argument('vm_name', existing_vm_name) + c.argument('vmss_name', vmss_name_type) + + for scope in ['vm identity remove', 'vmss identity remove']: + with self.argument_context(scope) as c: + c.argument('identities', nargs='+', help="Space-separated identities to remove. Use '{0}' to refer to the system assigned identity. Default: '{0}'".format(MSI_LOCAL_ID)) + c.argument('vm_name', existing_vm_name) + c.argument('vmss_name', vmss_name_type) + + for scope in ['vm identity show', 'vmss identity show']: + with self.argument_context(scope) as c: + c.argument('vm_name', existing_vm_name) + c.argument('vmss_name', vmss_name_type) + + for scope in ['vm application set', 'vmss application set']: + with self.argument_context(scope) as c: + c.argument('vm', existing_vm_name) + c.argument('vmss_name', vmss_name_type) + c.argument('application_version_ids', options_list=['--app-version-ids'], nargs='*', help="Space-separated application version ids to set to VM.") + c.argument('order_applications', action='store_true', help='Whether to set order index at each gallery application. If specified, the first app version id gets specified an order = 1, then the next one 2, and so on. This parameter is meant to be used when the VMApplications specified by app version ids must be installed in a particular order; the lowest order is installed first.') + c.argument('application_configuration_overrides', options_list=['--app-config-overrides'], nargs='*', + help='Space-separated application configuration overrides for each application version ids. ' + 'It should have the same number of items as the application version ids. Null is available for a application ' + 'which does not have a configuration override.') + c.argument('treat_deployment_as_failure', nargs='*', help="Space-separated list of true or false corresponding to the application version ids. If set to true, failure to install or update gallery application version operation will fail this operation") + + for scope in ['vm application list', 'vmss application list']: + with self.argument_context(scope) as c: + c.argument('vm_name', options_list=['--vm-name', '--name', '-n'], arg_type=existing_vm_name, id_part=None) + c.argument('vmss_name', vmss_name_type, id_part=None) + + for scope in ['vm create', 'vmss create']: + with self.argument_context(scope) as c: + c.argument('location', get_location_type(self.cli_ctx), help='Location in which to create VM and related resources. If default location is not configured, will default to the resource group\'s location') + c.argument('tags', tags_type) + c.argument('no_wait', help='Do not wait for the long-running operation to finish.') + c.argument('validate', options_list=['--validate'], help='Generate and validate the ARM template without creating any resources.', action='store_true') + c.argument('size', help='The VM size to be created. See https://azure.microsoft.com/pricing/details/virtual-machines/ for size info.') + c.argument('image', completer=get_urn_aliases_completion_list) + c.argument('custom_data', help='Custom init script file or text (cloud-init, cloud-config, etc..)', completer=FilesCompleter(), type=file_type) + c.argument('secrets', multi_ids_type, help='One or many Key Vault secrets as JSON strings or files via `@{path}` containing `[{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)"}] }]`', type=file_type, completer=FilesCompleter()) + c.argument('assign_identity', nargs='*', arg_group='Managed Service Identity', help="accept system or user assigned identities separated by spaces. Use '[system]' to refer system assigned identity, or a resource id to refer user assigned identity. Check out help for more examples") + c.ignore('aux_subscriptions') + c.argument('edge_zone', edge_zone_type) + c.argument('accept_term', action='store_true', help="Accept the license agreement and privacy statement.") + c.argument('disable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Disable installing guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS. It will become the default behavior, so it will become useless', deprecate_info=c.deprecate(hide=True)) + c.argument('enable_integrity_monitoring', action='store_true', min_api='2020-12-01', help='Enable installing Microsoft propietary and not security supported guest attestation extension and enabling System Assigned Identity for Trusted Launch enabled VMs and VMSS.') + c.argument('os_disk_security_encryption_type', arg_type=get_enum_type(self.get_models('SecurityEncryptionTypes')), min_api='2021-11-01', help='Specify the encryption type of the OS managed disk.') + c.argument('os_disk_secure_vm_disk_encryption_set', min_api='2021-11-01', help='Specify the customer managed disk encryption set resource ID or name for the managed disk that is used for customer managed key encrypted Confidential VM OS disk and VM guest blob.') + c.argument('disable_integrity_monitoring_autoupgrade', action='store_true', min_api='2020-12-01', help='Disable auto upgrade of guest attestation extension for Trusted Launch enabled VMs and VMSS.') + + for scope in ['vm create', 'vmss create']: + with self.argument_context(scope, arg_group='Authentication') as c: + c.argument('generate_ssh_keys', action='store_true', help='Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory') + c.argument('ssh_key_type', arg_type=get_enum_type(['RSA', 'Ed25519']), default='RSA', min_api='2023-09-01', help='Specify the type of SSH public and private key files to be generated if missing.') + c.argument('admin_username', help='Username for the VM. Default value is current username of OS. If the default value is system reserved, then default value will be set to azureuser. Please refer to https://docs.microsoft.com/rest/api/compute/virtualmachines/createorupdate#osprofile to get a full list of reserved values.') + c.argument('admin_password', help="Password for the VM if authentication type is 'Password'.") + c.argument('ssh_key_value', options_list=['--ssh-key-values'], completer=FilesCompleter(), type=file_type, nargs='+') + c.argument('ssh_dest_key_path', help='Destination file path on the VM for the SSH key. If the file already exists, the specified key(s) are appended to the file. Destination path for SSH public keys is currently limited to its default value "/home/username/.ssh/authorized_keys" due to a known issue in Linux provisioning agent.') + c.argument('authentication_type', help='Type of authentication to use with the VM. Defaults to password for Windows and SSH public key for Linux. "all" enables both ssh and password authentication. ', arg_type=get_enum_type(['ssh', 'password', 'all'])) + + for scope in ['vm create', 'vmss create']: + with self.argument_context(scope, arg_group='Storage') as c: + if DiskStorageAccountTypes: + allowed_values = ", ".join([sku.value for sku in DiskStorageAccountTypes]) + else: + allowed_values = ", ".join(['Premium_LRS', 'Standard_LRS']) + + usage = 'Usage: [--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...], where each ID is "os" or a 0-indexed lun.' + allowed_values = 'Allowed values: {}.'.format(allowed_values) + storage_sku_help = 'The SKU of the storage account with which to persist VM. Use a singular sku that would be applied across all disks, ' \ + 'or specify individual disks. {} {}'.format(usage, allowed_values) + + c.argument('os_disk_name', help='The name of the new VM OS disk.') + c.argument('os_type', help='Type of OS installed on a custom VHD. Do not use when specifying an URN or URN alias.', arg_type=get_enum_type(['windows', 'linux'])) + c.argument('storage_account', help="Only applicable when used with `--use-unmanaged-disk`. The name to use when creating a new storage account or referencing an existing one. If omitted, an appropriate storage account in the same resource group and location will be used, or a new one will be created.") + c.argument('storage_sku', nargs='+', help=storage_sku_help) + c.argument('storage_container_name', help="Only applicable when used with `--use-unmanaged-disk`. Name of the storage container for the VM OS disk. Default: vhds") + c.ignore('os_publisher', 'os_offer', 'os_sku', 'os_version', 'storage_profile') + c.argument('use_unmanaged_disk', action='store_true', help='Do not use managed disk to persist VM') + c.argument('os_disk_size_gb', type=int, help='OS disk size in GB to create.') + c.argument('data_disk_sizes_gb', nargs='+', type=int, help='space-separated empty managed data disk sizes in GB to create') + c.ignore('disk_info', 'storage_account_type', 'public_ip_address_type', 'nsg_type', 'nic_type', 'vnet_type', 'load_balancer_type', 'app_gateway_type') + c.argument('os_caching', options_list=[self.deprecate(target='--storage-caching', redirect='--os-disk-caching', hide=True), '--os-disk-caching'], help='Storage caching type for the VM OS disk. Default: ReadWrite', arg_type=get_enum_type(CachingTypes)) + c.argument('data_caching', options_list=['--data-disk-caching'], nargs='+', + help="storage caching type for data disk(s), including 'None', 'ReadOnly', 'ReadWrite', etc. Use a singular value to apply on all disks, or use `= =` to configure individual disk") + c.argument('ultra_ssd_enabled', ultra_ssd_enabled_type) + c.argument('ephemeral_os_disk', arg_type=get_three_state_flag(), min_api='2018-06-01', + help='Allows you to create an OS disk directly on the host node, providing local disk performance and faster VM/VMSS reimage time.') + c.argument('ephemeral_os_disk_placement', arg_type=ephemeral_placement_type, + help='Only applicable when used with `--ephemeral-os-disk`. Allows you to choose the Ephemeral OS disk provisioning location.') + c.argument('os_disk_encryption_set', min_api='2019-07-01', help='Name or ID of disk encryption set for OS disk.') + c.argument('data_disk_encryption_sets', nargs='+', min_api='2019-07-01', + help='Names or IDs (space delimited) of disk encryption sets for data disks.') + c.argument('data_disk_iops', min_api='2019-07-01', nargs='+', type=int, help='Specify the Read-Write IOPS (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') + c.argument('data_disk_mbps', min_api='2019-07-01', nargs='+', type=int, help='Specify the bandwidth in MB per second (space delimited) for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not specified, a default value would be assigned based on diskSizeGB.') + c.argument('specialized', arg_type=get_three_state_flag(), help='Indicate whether the source image is specialized.') + c.argument('encryption_at_host', arg_type=get_three_state_flag(), help='Enable Host Encryption for the VM or VMSS. This will enable the encryption for all the disks including Resource/Temp disk at host itself.') + + for scope in ['vm create', 'vmss create']: + with self.argument_context(scope, arg_group='Network') as c: + c.argument('vnet_name', help='Name of the virtual network when creating a new one or referencing an existing one.') + c.argument('vnet_address_prefix', help='The IP address prefix to use when creating a new VNet in CIDR format.') + c.argument('subnet', help='The name of the subnet when creating a new VNet or referencing an existing one. Can also reference an existing subnet by ID. If both vnet-name and subnet are omitted, an appropriate VNet and subnet will be selected automatically, or a new one will be created.') + c.argument('subnet_address_prefix', help='The subnet IP address prefix to use when creating a new VNet in CIDR format.') + c.argument('nics', nargs='+', help='Names or IDs of existing NICs to attach to the VM. The first NIC will be designated as primary. If omitted, a new NIC will be created. If an existing NIC is specified, do not specify subnet, VNet, public IP or NSG.') + c.argument('private_ip_address', help='Static private IP address (e.g. 10.0.0.5).') + c.argument('public_ip_address', help='Name of the public IP address when creating one (default) or referencing an existing one. Can also reference an existing public IP by ID or specify "" for None (\'""\' in Azure CLI using PowerShell or --% operator). For Azure CLI using powershell core edition 7.3.4, specify '' or "" (--public-ip-address '' or --public-ip-address "")') + c.argument('public_ip_address_allocation', help=None, default=None, arg_type=get_enum_type(['dynamic', 'static'])) + c.argument('public_ip_address_dns_name', help='Globally unique DNS name for a newly created public IP.') + + if self.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK): + c.argument('public_ip_sku', help='Public IP SKU. The public IP is supported to be created on edge zone only when it is \'Standard\'', + default='Standard', arg_type=get_enum_type(['Basic', 'Standard'])) + + c.argument('nic_delete_option', nargs='+', min_api='2021-03-01', + help='Specify what happens to the network interface when the VM is deleted. Use a singular ' + 'value to apply on all resources, or use = to configure ' + 'the delete behavior for individual resources. Possible options are Delete and Detach.') + + for scope in ['vm create', 'vmss create']: + with self.argument_context(scope, arg_group='Marketplace Image Plan') as c: + c.argument('plan_name', help='plan name') + c.argument('plan_product', help='plan product') + c.argument('plan_publisher', help='plan publisher') + c.argument('plan_promotion_code', help='plan promotion code') + + for scope in ['vm create', 'vmss create', 'vm identity assign', 'vmss identity assign']: + with self.argument_context(scope) as c: + arg_group = 'Managed Service Identity' if scope.split()[-1] == 'create' else None + c.argument('identity_scope', options_list=['--scope'], arg_group=arg_group, + help="Scope that the system assigned identity can access. ") + c.ignore('identity_role_id') + + for scope in ['vm create', 'vmss create']: + with self.argument_context(scope) as c: + c.argument('identity_role', options_list=['--role'], arg_group='Managed Service Identity', + help='Role name or id the system assigned identity will have. ') + + for scope in ['vm identity assign', 'vmss identity assign']: + with self.argument_context(scope) as c: + c.argument('identity_role', options_list=['--role'], + help='Role name or id the system assigned identity will have.') + + with self.argument_context('vm auto-shutdown') as c: + c.argument('off', action='store_true', help='Turn off auto-shutdown for VM. Configuration will be cleared.') + c.argument('email', help='The email recipient to send notifications to (can be a list of semi-colon separated email addresses)') + c.argument('time', help='The UTC time of day the schedule will occur every day. Format: hhmm. Example: 1730') + c.argument('webhook', help='The webhook URL to which the notification will be sent') + c.argument('location', validator=get_default_location_from_resource_group) + + for scope in ['vm diagnostics', 'vmss diagnostics']: + with self.argument_context(scope) as c: + c.argument('version', help='version of the diagnostics extension. Will use the latest if not specfied') + c.argument('settings', help='json string or a file path, which defines data to be collected.', type=validate_file_or_dict, completer=FilesCompleter()) + c.argument('protected_settings', help='json string or a file path containing private configurations such as storage account keys, etc.', type=validate_file_or_dict, completer=FilesCompleter()) + c.argument('is_windows_os', action='store_true', help='for Windows VMs') + + for scope in ['vm encryption', 'vmss encryption']: + with self.argument_context(scope) as c: + c.argument('volume_type', help='Type of volume that the encryption operation is performed on', arg_type=get_enum_type(['DATA', 'OS', 'ALL'])) + c.argument('force', action='store_true', help='continue by ignoring client side validation errors') + c.argument('disk_encryption_keyvault', help='Name or ID of the key vault where the generated encryption key will be placed.') + c.argument('key_encryption_key', help='Key vault key name or URL used to encrypt the disk encryption key.') + c.argument('key_encryption_keyvault', help='Name or ID of the key vault containing the key encryption key used to encrypt the disk encryption key. If missing, CLI will use `--disk-encryption-keyvault`.') + + for scope in ['vm extension', 'vmss extension']: + with self.argument_context(scope) as c: + c.argument('publisher', help='The name of the extension publisher.') + c.argument('settings', type=validate_file_or_dict, help='Extension settings in JSON format. A JSON file path is also accepted.') + c.argument('protected_settings', type=validate_file_or_dict, help='Protected settings in JSON format for sensitive information like credentials. A JSON file path is also accepted.') + c.argument('version', help='The version of the extension. To pin extension version to this value, please specify --no-auto-upgrade-minor-version.') + c.argument('enable_auto_upgrade', arg_type=get_three_state_flag(), + help='Indicate the extension should be automatically upgraded by the platform if there is a newer version of the extension available.') + + with self.argument_context('vm extension set') as c: + c.argument('vm_extension_name', name_arg_type, + completer=get_resource_name_completion_list('Microsoft.Compute/virtualMachines/extensions'), + help='Name of the extension.', id_part=None) + c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') + c.argument('extension_instance_name', extension_instance_name_type) + + with self.argument_context('vmss extension set', min_api='2017-12-01') as c: + c.argument('force_update', action='store_true', help='force to update even if the extension configuration has not changed.') + c.argument('extension_instance_name', extension_instance_name_type) + c.argument('provision_after_extensions', nargs='+', help='Space-separated list of extension names after which this extension should be provisioned. These extensions must already be set on the vm.') + + for scope in ['vm extension image', 'vmss extension image']: + with self.argument_context(scope) as c: + c.argument('image_location', options_list=['--location', '-l'], help='Image location.') + c.argument('name', help='Image name', id_part=None) + c.argument('publisher_name', options_list=['--publisher', '-p'], help='Image publisher name') + c.argument('type', options_list=['--name', '-n'], help='Name of the extension') + c.argument('latest', action='store_true', help='Show the latest version only.') + c.argument('version', help='Extension version') + c.argument('orderby', help="the $orderby odata query option") + c.argument('top', help='the $top odata query option') + + for scope in ['vm create', 'vm update', 'vmss create', 'vmss update']: + with self.argument_context(scope) as c: + c.argument('license_type', license_type) + c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2019-03-01', + arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), + help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") + c.argument('max_price', min_api='2019-03-01', type=float, is_preview=True, + help='The maximum price (in US Dollars) you are willing to pay for a Spot VM/VMSS. -1 indicates that the Spot VM/VMSS should not be evicted for price reasons') + c.argument('capacity_reservation_group', options_list=['--capacity-reservation-group', '--crg'], + help='The ID or name of the capacity reservation group that is used to allocate. Pass in "None" to disassociate the capacity reservation group. Please note that if you want to delete a VM/VMSS that has been associated with capacity reservation group, you need to disassociate the capacity reservation group first.', + min_api='2021-04-01', is_preview=True) + c.argument('v_cpus_available', type=int, min_api='2021-11-01', help='Specify the number of vCPUs available') + c.argument('v_cpus_per_core', type=int, min_api='2021-11-01', help='Specify the ratio of vCPU to physical core. Setting this property to 1 also means that hyper-threading is disabled.') + c.argument('disk_controller_type', disk_controller_type) + c.argument('enable_proxy_agent', arg_type=get_three_state_flag(), min_api='2023-09-01', help='Specify whether proxy agent feature should be enabled on the virtual machine or virtual machine scale set.') + c.argument('proxy_agent_mode', arg_type=get_enum_type(self.get_models('Mode')), min_api='2023-09-01', help='Specify the mode that proxy agent will execute on if the feature is enabled.') + + with self.argument_context('vm update') as c: + c.argument('license_type', license_type) + c.argument('user_data', help='UserData for the VM. It can be passed in as file or string. If empty string is passed in, the existing value will be deleted.', completer=FilesCompleter(), type=file_type, min_api='2021-03-01') + + with self.argument_context('vmss create') as c: + c.argument('priority', resource_type=ResourceType.MGMT_COMPUTE, min_api='2017-12-01', + arg_type=get_enum_type(self.get_models('VirtualMachinePriorityTypes'), default=None), + help="Priority. Use 'Spot' to run short-lived workloads in a cost-effective way. 'Low' enum will be deprecated in the future. Please use 'Spot' to deploy Azure spot VM and/or VMSS. Default to Regular.") + + with self.argument_context('sig') as c: + c.argument('gallery_name', options_list=['--gallery-name', '-r'], help='gallery name') + c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], help='gallery image definition') + c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], help='gallery image version') + + with self.argument_context('sig show') as c: + c.argument('gallery_name', options_list=['--gallery-name', '-r'], id_part='name', help='gallery name') + c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], id_part='child_name_1', help='gallery image definition') + + with self.argument_context('sig show') as c: + c.argument('select', help='The select expression to apply on the operation.') + c.argument('sharing_groups', action='store_true', help='The expand query option to query shared gallery groups') + + with self.argument_context('sig list-shared') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx)) + c.argument('shared_to', shared_to_type) + + for scope in ['sig share add', 'sig share remove']: + with self.argument_context(scope) as c: + c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') + c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') + c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') + + with self.argument_context('sig share add') as c: + c.argument('op_type', default='Add', deprecate_info=c.deprecate(hide=True), + help='distinguish add operation and remove operation') + + with self.argument_context('sig share remove') as c: + c.argument('op_type', default='Remove', deprecate_info=c.deprecate(hide=True), + help='distinguish add operation and remove operation') + + with self.argument_context('sig share reset') as c: + c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') + + with self.argument_context('sig image-definition create') as c: + c.argument('offer', options_list=['--offer', '-f'], help='image offer') + c.argument('sku', options_list=['--sku', '-s'], help='image sku') + c.argument('publisher', options_list=['--publisher', '-p'], help='image publisher') + c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD') + c.argument('os_state', arg_type=get_enum_type(self.get_models('OperatingSystemStateTypes')), help="This property allows the user to specify whether the virtual machines created under this image are 'Generalized' or 'Specialized'.") + c.argument('hyper_v_generation', arg_type=get_enum_type(self.get_models('HyperVGenerationTypes')), help='The hypervisor generation of the Virtual Machine. Applicable to OS disks only.') + c.argument('minimum_cpu_core', type=int, arg_group='Recommendation', help='minimum cpu cores') + c.argument('maximum_cpu_core', type=int, arg_group='Recommendation', help='maximum cpu cores') + c.argument('minimum_memory', type=int, arg_group='Recommendation', help='minimum memory in MB') + c.argument('maximum_memory', type=int, arg_group='Recommendation', help='maximum memory in MB') + + c.argument('plan_publisher', help='plan publisher', arg_group='Purchase plan') + c.argument('plan_name', help='plan name', arg_group='Purchase plan') + c.argument('plan_product', help='plan product', arg_group='Purchase plan') + + c.argument('eula', help='The Eula agreement for the gallery image') + c.argument('privacy_statement_uri', help='The privacy statement uri') + c.argument('release_note_uri', help='The release note uri') + c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") + c.argument('disallowed_disk_types', nargs='*', help='disk types which would not work with the image, e.g., Standard_LRS') + c.argument('features', help='A list of gallery image features. E.g. "IsSecureBootSupported=true IsMeasuredBootSupported=false"') + c.argument('architecture', arg_type=get_enum_type(self.get_models('Architecture', operation_group='gallery_images')), min_api='2021-10-01', help='CPU architecture.') + + with self.argument_context('sig image-definition list-shared') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', + id_part='child_name_1') + c.argument('shared_to', shared_to_type) + c.argument('marker', arg_type=marker_type) + c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') + + with self.argument_context('sig create') as c: + c.argument('description', help='the description of the gallery') + + with self.argument_context('sig update') as c: + c.ignore('gallery') + + for scope in ['sig create', 'sig update']: + with self.argument_context(scope) as c: + c.argument('permissions', arg_type=get_enum_type(GallerySharingPermissionTypes), + arg_group='Sharing Profile', + min_api='2020-09-30', + help='This property allows you to specify the permission of sharing gallery.') + c.argument('soft_delete', arg_type=get_three_state_flag(), min_api='2021-03-01', + deprecate_info=Deprecated(self.cli_ctx, hide=True, message_func=lambda x: "Argument '--soft-delete' is in preview and under development. Reference and support levels: https://aka.ms/CLI_refstatus"), + help='Enable soft-deletion for resources in this gallery, ' + 'allowing them to be recovered within retention time.') + c.argument('publisher_uri', help='Community gallery publisher uri.') + c.argument('publisher_contact', options_list=['--publisher-email'], + help='Community gallery publisher contact email.') + c.argument('eula', help='Community gallery publisher eula.') + c.argument('public_name_prefix', help='Community gallery public name prefix.') + + with self.argument_context('sig image-definition create') as c: + c.argument('description', help='the description of the gallery image definition') + with self.argument_context('sig image-definition update') as c: + c.ignore('gallery_image') + + with self.argument_context('sig image-version') as c: + deprecated_option = c.deprecate(target='--gallery-image-version-name', redirect='--gallery-image-version', hide=True, expiration="3.0.0") + c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e', deprecated_option], + help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') + + for scope in ['sig image-version create', 'sig image-version undelete']: + with self.argument_context(scope, resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: + c.argument('gallery_image_version', options_list=['--gallery-image-version', '-e'], + help='Gallery image version in semantic version pattern. The allowed characters are digit and period. Digits must be within the range of a 32-bit integer, e.g. `..`') + + with self.argument_context('sig image-version create', resource_type=ResourceType.MGMT_COMPUTE, operation_group='gallery_image_versions') as c: + c.argument('description', help='the description of the gallery image version') + c.argument('managed_image', help='image name(if in the same resource group) or resource id') + c.argument('os_snapshot', help='Name or ID of OS disk snapshot') + c.argument('data_snapshots', nargs='+', help='Names or IDs (space-delimited) of data disk snapshots') + c.argument('data_snapshot_luns', nargs='+', help='Logical unit numbers (space-delimited) of data disk snapshots') + c.argument('exclude_from_latest', arg_type=get_three_state_flag(), help='The flag means that if it is set to true, people deploying VMs with version omitted will not use this version.') + c.argument('version', help='image version') + c.argument('end_of_life_date', help="the end of life date, e.g. '2020-12-31'") + c.argument('storage_account_type', help="The default storage account type to be used per region. To set regional storage account types, use --target-regions", + arg_type=get_enum_type(["Standard_LRS", "Standard_ZRS", "Premium_LRS"]), min_api='2019-03-01') + c.argument('target_region_encryption', nargs='+', + help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. Format for each region: `,,,,`. Use "null" as a placeholder.') + c.argument('os_vhd_uri', help='Source VHD URI of OS disk') + c.argument('os_vhd_storage_account', help='Name or ID of storage account of source VHD URI of OS disk') + c.argument('data_vhds_uris', nargs='+', help='Source VHD URIs (space-delimited) of data disks') + c.argument('data_vhds_luns', nargs='+', help='Logical unit numbers (space-delimited) of source VHD URIs of data disks') + c.argument('data_vhds_storage_accounts', options_list=['--data-vhds-storage-accounts', '--data-vhds-sa'], nargs='+', help='Names or IDs (space-delimited) of storage accounts of source VHD URIs of data disks') + c.argument('replication_mode', min_api='2021-07-01', arg_type=get_enum_type(ReplicationMode), help='Optional parameter which specifies the mode to be used for replication. This property is not updatable.') + c.argument('target_region_cvm_encryption', nargs='+', min_api='2021-10-01', help='Space-separated list of customer managed key for Confidential VM encrypting the OS disk in the gallery artifact for each region. Format for each region: `,`. The valid values for os_cvm_encryption_type are EncryptedVMGuestStateOnlyWithPmk, EncryptedWithPmk, EncryptedWithCmk.') + c.argument('virtual_machine', help='Resource id of VM source') + c.argument('image_version', help='Resource id of gallery image version source') + c.argument('target_zone_encryption', nargs='+', min_api='2022-01-03', + options_list=['--target-edge-zone-encryption', '--zone-encryption'], + help='Space-separated list of customer managed keys for encrypting the OS and data disks in the gallery artifact for each region. ' + 'Format for each edge zone: ,,,,,.') + + with self.argument_context('sig image-version list-shared') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', + id_part='child_name_1') + c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' + 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', + id_part='child_name_2') + c.argument('shared_to', shared_to_type) + c.argument('marker', arg_type=marker_type) + c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') + + with self.argument_context('sig image-version show') as c: + c.argument('expand', help="The expand expression to apply on the operation, e.g. 'ReplicationStatus'") + + with self.argument_context('sig image-version show-shared') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('gallery_unique_name', type=str, help='The unique name of the Shared Gallery.', + id_part='child_name_1') + c.argument('gallery_image_name', options_list=['--gallery-image-definition', '-i'], type=str, help='The name ' + 'of the Shared Gallery Image Definition from which the Image Versions are to be listed.', + id_part='child_name_2') + c.argument('gallery_image_version_name', options_list=['--gallery-image-version', '-e'], type=str, help='The ' + 'name of the gallery image version to be created. Needs to follow semantic version name pattern: ' + 'The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. ' + 'Format: ..', id_part='child_name_3') + + for scope in ['sig image-version create', 'sig image-version update']: + with self.argument_context(scope, operation_group='gallery_image_versions') as c: + c.argument('target_regions', nargs='*', + help='Space-separated list of regions and their replica counts. Use `[=][=]` to optionally set the replica count and/or storage account type for each region. ' + 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used') + c.argument('replica_count', help='The default number of replicas to be created per region. To set regional replication counts, use --target-regions', type=int) + c.argument('target_edge_zones', nargs='*', min_api='2022-01-03', + help='Space-separated list of regions, edge zones, replica counts and storage types. Use =[=][=] to optionally set the replica count and/or storage account type for each region. ' + 'If a replica count is not specified, the default replica count will be used. If a storage account type is not specified, the default storage account type will be used. ' + 'If "--target-edge-zones None" is specified, the target extended locations will be cleared.') + + for scope in ['sig image-version create', 'sig image-version update', 'sig image-version undelete']: + with self.argument_context(scope, operation_group='gallery_image_versions') as c: + c.argument('allow_replicated_location_deletion', arg_type=get_three_state_flag(), min_api='2022-03-03', help='Indicate whether or not removing this gallery image version from replicated regions is allowed.') + + with self.argument_context('sig list-community') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx)) + c.argument('marker', arg_type=marker_type) + c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') + + with self.argument_context('sig image-definition show-community') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('public_gallery_name', public_gallery_name_type) + c.argument('gallery_image_name', gallery_image_name_type) + + with self.argument_context('sig image-definition list-community') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('public_gallery_name', public_gallery_name_type) + c.argument('marker', arg_type=marker_type) + c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') + + with self.argument_context('sig image-version show-community') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('public_gallery_name', public_gallery_name_type) + c.argument('gallery_image_name', gallery_image_name_type) + c.argument('gallery_image_version_name', gallery_image_name_version_type) + + with self.argument_context('sig image-version list-community') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name') + c.argument('public_gallery_name', public_gallery_name_type) + c.argument('gallery_image_name', gallery_image_name_type) + c.argument('marker', arg_type=marker_type) + c.argument('show_next_marker', action='store_true', help='Show nextMarker in result when specified.') + + with self.argument_context('sig share enable-community') as c: + c.argument('gallery_name', type=str, help='The name of the Shared Image Gallery.', id_part='name') + c.argument('subscription_ids', nargs='+', help='A list of subscription ids to share the gallery.') + c.argument('tenant_ids', nargs='+', help='A list of tenant ids to share the gallery.') + c.argument('op_type', default='EnableCommunity', deprecate_info=c.deprecate(hide=True), + help='distinguish add operation and remove operation') + + # endregion + + # region Gallery applications + with self.argument_context('sig gallery-application') as c: + c.argument('gallery_application_name', options_list=['--name', '-n', '--application-name'], + help='The name of the gallery Application') + + with self.argument_context('sig gallery-application create') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, + validator=get_default_location_from_resource_group) + c.argument('description', help='The description of this gallery Application Definition resource. ' + 'This property is updatable.') + c.argument('os_type', arg_type=get_enum_type(['Windows', 'Linux']), help='This property allows you ' + 'to specify the supported type of the OS that application is built for.

Possible values ' + 'are:

**Windows**

**Linux**') + + with self.argument_context('sig gallery-application update') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, + validator=get_default_location_from_resource_group) + c.argument('description', help='The description of this gallery Application Definition resource. ' + 'This property is updatable.') + + with self.argument_context('sig gallery-application version') as c: + c.argument('gallery_application_name', options_list=['--application-name'], + help='The name of the gallery Application') + c.argument('gallery_application_version_name', options_list=['--name', '-n', '--version-name'], + help='The name of the gallery Application Version') + + with self.argument_context('sig gallery-application version create') as c: + c.argument('package_file_name', help='The name to assign the downloaded package file on the VM. This is limited to 4096 characters.' + 'If not specified, the package file will be named the same as the Gallery Application name.') + c.argument('config_file_name', help='The name to assign the downloaded config file on the VM. This is limited to 4096 characters. ' + 'If not specified, the config file will be named the Gallery Application name appended with "_config"') + + for scope in ['create', 'update']: + with self.argument_context('sig gallery-application version {}'.format(scope)) as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, + validator=get_default_location_from_resource_group) + c.argument('tags', tags_type) + c.argument('package_file_link', help='The mediaLink of the artifact, must be a readable storage page blob.') + c.argument('install_command', help='The path and arguments to install the gallery application.') + c.argument('remove_command', help='The path and arguments to remove the gallery application.') + c.argument('update_command', help='The path and arguments to update the gallery application. If not present,' + ' then update operation will invoke remove command on the previous version' + ' and install command on the current version of the gallery application.') + c.argument('target_regions', type=validate_file_or_dict, help='The target regions where the Image Version is' + 'going to be replicated to. This property is updatable. Expected value: ' + 'json-string/json-file/@json-file.') + c.argument('default_file_link', help='The default configuration link of the artifact, must be a readable storage page blob.') + c.argument('exclude_from', arg_type=get_three_state_flag(), help='If set to true, Virtual Machines ' + 'deployed from the latest version of the Image Definition won\'t use this Image Version.', + arg_group='Publishing Profile') + c.argument('end_of_life_date', help='The end of life date of the gallery image version. This property can be ' + 'used for decommissioning purposes. This property is updatable.', arg_group='Publishing Profile') + # endregion + + # region Proximity Placement Group + with self.argument_context('ppg', min_api='2018-04-01') as c: + c.argument('proximity_placement_group_name', arg_type=name_arg_type, help="The name of the proximity placement group.") + + with self.argument_context('ppg create') as c: + c.argument('tags', tags_type, min_api='2018-04-01') + c.argument('zone', zone_type, min_api='2021-11-01') + + for scope in ['ppg create', 'ppg update']: + with self.argument_context(scope) as c: + c.argument('ppg_type', options_list=['--type', '-t'], arg_type=get_enum_type(self.get_models('ProximityPlacementGroupType')), min_api='2018-04-01', help="The type of the proximity placement group.") + c.argument('intent_vm_sizes', nargs='*', min_api='2021-11-01', help="Specify possible sizes of virtual machines that can be created in the proximity placement group.") + + with self.argument_context('vm create', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], + help="The name or ID of the proximity placement group the VM should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vmss create', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], + help="The name or ID of the proximity placement group the VMSS should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vm availability-set create', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], + help="The name or ID of the proximity placement group the availability set should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vm update', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], + help="The name or ID of the proximity placement group the VM should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vmss update', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], + help="The name or ID of the proximity placement group the VMSS should be associated with.", + validator=_validate_proximity_placement_group) + + with self.argument_context('vm availability-set update', min_api='2018-04-01') as c: + c.argument('proximity_placement_group', options_list=['--ppg'], + help="The name or ID of the proximity placement group the availability set should be associated with.", + validator=_validate_proximity_placement_group) + # endregion + + # region VM Monitor + with self.argument_context('vm monitor log show') as c: + c.argument('analytics_query', options_list=['--analytics-query', '-q'], help="Query to execute over Log Analytics data.") + c.argument('timespan', help="Timespan over which to query. Defaults to querying all available data.") + + with self.argument_context('vm monitor metrics') as c: + c.argument('metricnamespace', options_list=['--namespace'], + help='Namespace to query metric definitions for.') + + with self.argument_context('vm monitor metrics tail') as c: + from azure.mgmt.monitor.models import AggregationType + c.extra('resource_group_name', required=True) + c.argument('resource', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) + c.argument('metadata', action='store_true') + c.argument('dimension', nargs='*', validator=validate_metric_dimension) + c.argument('aggregation', arg_type=get_enum_type(t for t in AggregationType if t.name != 'none'), nargs='*') + c.argument('metrics', nargs='*') + c.argument('orderby', + help='Aggregation to use for sorting results and the direction of the sort. Only one order can be specificed. Examples: sum asc') + c.argument('top', help='Max number of records to retrieve. Valid only if --filter used.') + c.argument('filters', options_list=['--filter']) + c.argument('metric_namespace', options_list=['--namespace']) + + with self.argument_context('vm monitor metrics tail', arg_group='Time') as c: + c.argument('start_time', arg_type=get_datetime_type(help='Start time of the query.')) + c.argument('end_time', arg_type=get_datetime_type(help='End time of the query. Defaults to the current time.')) + c.argument('offset', type=get_period_type(as_timedelta=True)) + c.argument('interval', arg_group='Time', type=get_period_type()) + + with self.argument_context('vm monitor metrics list-definitions') as c: + c.extra('resource_group_name', required=True) + c.argument('resource_uri', arg_type=existing_vm_name, help='Name or ID of a virtual machine', validator=validate_vm_name_for_monitor_metrics, id_part=None) + # endregion + + # region disk encryption set + with self.argument_context('disk-encryption-set') as c: + c.argument('disk_encryption_set_name', disk_encryption_set_name) + c.argument('key_url', help='URL pointing to a key or secret in KeyVault.') + c.argument('source_vault', help='Name or ID of the KeyVault containing the key or secret.') + c.argument('encryption_type', arg_type=get_enum_type(['EncryptionAtRestWithPlatformKey', 'EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys', 'ConfidentialVmEncryptedWithCustomerKey']), + help='The type of key used to encrypt the data of the disk. EncryptionAtRestWithPlatformKey: Disk is encrypted at rest with Platform managed key. It is the default encryption type. EncryptionAtRestWithCustomerKey: Disk is encrypted at rest with Customer managed key that can be changed and revoked by a customer. EncryptionAtRestWithPlatformAndCustomerKeys: Disk is encrypted at rest with 2 layers of encryption. One of the keys is Customer managed and the other key is Platform managed. ConfidentialVmEncryptedWithCustomerKey: An additional encryption type accepted for confidential VM. Disk is encrypted at rest with Customer managed key.') + c.argument('location', validator=get_default_location_from_resource_group) + c.argument('tags', tags_type) + c.argument('enable_auto_key_rotation', arg_type=get_three_state_flag(), min_api='2020-12-01', + options_list=['--enable-auto-key-rotation', '--auto-rotation'], + help='Enable automatic rotation of keys.') + + with self.argument_context('disk-encryption-set create', operation_group='disk_encryption_sets', + min_api='2022-03-02') as c: + c.argument('federated_client_id', help='The federated client id used in cross tenant scenario.') + c.argument('mi_system_assigned', arg_group='Managed Identity', arg_type=get_three_state_flag(), + help='Provide this flag to use system assigned identity. Check out help for more examples') + c.argument('mi_user_assigned', arg_group='Managed Identity', nargs='+', + help='User Assigned Identity ids to be used for disk encryption set. ' + 'Check out help for more examples') + + with self.argument_context('disk-encryption-set update', operation_group='disk_encryption_sets', + min_api='2022-03-02') as c: + c.argument('federated_client_id', help='The federated client id used in cross tenant scenario.') + + with self.argument_context('disk-encryption-set identity', operation_group='disk_encryption_sets', + min_api='2022-03-02') as c: + c.argument('mi_system_assigned', options_list=['--system-assigned'], + arg_group='Managed Identity', arg_type=get_three_state_flag(), + help='Provide this flag to use system assigned identity for disk encryption set. ' + 'Check out help for more examples') + c.argument('mi_user_assigned', options_list=['--user-assigned'], arg_group='Managed Identity', nargs='*', + help='User Assigned Identity ids to be used for disk encryption set. ' + 'Check out help for more examples') + # endregion + + # region DiskAccess + with self.argument_context('disk-access', resource_type=ResourceType.MGMT_COMPUTE, operation_group='disk_accesses') as c: + c.argument('disk_access_name', arg_type=name_arg_type, help='Name of the disk access resource.', id_part='name') + c.argument('location', validator=get_default_location_from_resource_group) + c.argument('tags', tags_type) + # endRegion + + # region Capacity + with self.argument_context('capacity reservation group') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) + c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-n'], + help='The name of the capacity reservation group.') + c.argument('tags', tags_type) + c.argument('sharing_profile', nargs='*', help='Space-separated subscription resource IDs or nothing. Specify the settings to enable sharing across subscriptions for the capacity reservation group resource. Specify it to nothing to unsharing.') + + with self.argument_context('capacity reservation group create') as c: + c.argument('zones', zones_type, help='Availability Zones to use for this capacity reservation group. If not provided, the group supports only regional resources in the region. If provided, enforces each capacity reservation in the group to be in one of the zones.') + + with self.argument_context('capacity reservation group show') as c: + c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve the list of instance views of the capacity reservations under the capacity reservation group which is a snapshot of the runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations.') + + with self.argument_context('capacity reservation') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group) + c.argument('capacity_reservation_group_name', options_list=['--capacity-reservation-group', '-c'], + help='The name of the capacity reservation group.') + c.argument('capacity_reservation_name', options_list=['--capacity-reservation-name', '-n'], + help='The name of the capacity reservation.') + c.argument('capacity', type=int, help='Specify the number of virtual machines in the scale set.') + c.argument('tags', tags_type) + + with self.argument_context('capacity reservation create') as c: + c.argument('zone', zone_type, help='Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.') + c.argument('sku_name', options_list=['--sku', '-s'], required=True, help='The SKU of the resource for which capacity needs be reserved. Currently VM Skus with the capability called "CapacityReservationSupported" set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.') + + with self.argument_context('capacity reservation show') as c: + c.argument('instance_view', action='store_true', options_list=['--instance-view', '-i'], help='Retrieve a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.') + # endRegion + + # region Restore point + with self.argument_context('restore-point') as c: + c.argument('restore_point_collection_name', options_list=['--collection-name'], + help='The name of the restore point collection.') + + with self.argument_context('restore-point create') as c: + c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], + help='The name of the restore point.') + c.argument('exclude_disks', nargs='+', help='List of disk resource ids that the ' + 'customer wishes to exclude from the restore point. If no disks are specified, all disks will be ' + 'included.') + c.argument('source_restore_point', help='Resource Id of the source restore point from which a copy needs to be created') + c.argument('consistency_mode', arg_type=get_enum_type(self.get_models('ConsistencyModeTypes')), is_preview=True, min_api='2021-07-01', help='Consistency mode of the restore point. Can be specified in the input while creating a restore point. For now, only CrashConsistent is accepted as a valid input. Please refer to https://aka.ms/RestorePoints for more details.') + c.argument('source_os_resource', help='Resource Id of the source OS disk') + c.argument('os_restore_point_encryption_set', help='Customer managed OS disk encryption set resource id') + c.argument('os_restore_point_encryption_type', arg_type=get_enum_type(self.get_models('RestorePointEncryptionType')), help='The type of key used to encrypt the data of the OS disk restore point.') + c.argument('source_data_disk_resource', nargs='+', help='Resource Id of the source data disk') + c.argument('data_disk_restore_point_encryption_set', nargs='+', help='Customer managed data disk encryption set resource id') + c.argument('data_disk_restore_point_encryption_type', nargs='+', arg_type=get_enum_type(self.get_models('RestorePointEncryptionType')), help='The type of key used to encrypt the data of the data disk restore point.') + + with self.argument_context('restore-point show') as c: + c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], + help='The name of the restore point.') + c.argument('expand', help='The expand expression to apply on the operation.', + deprecate_info=c.deprecate(hide=True)) + c.argument('instance_view', action='store_true', help='Show the instance view of a restore point.') + + with self.argument_context('restore-point wait') as c: + c.argument('restore_point_name', options_list=['--name', '-n', '--restore-point-name'], + help='The name of the restore point.') + # endRegion + + # region Restore point collection + with self.argument_context('restore-point collection create') as c: + c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False, + validator=get_default_location_from_resource_group) + c.argument('tags', tags_type) + c.argument('source_id', help='Resource Id of the source resource used to create this restore point collection', + arg_group='Source') + + with self.argument_context('restore-point collection update') as c: + c.argument('tags', tags_type) + + with self.argument_context('restore-point collection show') as c: + c.argument('expand', help='The expand expression to apply on the operation.', + deprecate_info=c.deprecate(hide=True)) + c.argument('restore_points', action='store_true', help='Show all contained restore points in the restore point collection.') + # endRegion diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_template_builder.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_template_builder.py new file mode 100644 index 00000000000..6236a6f7d57 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_template_builder.py @@ -0,0 +1,1652 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +from enum import Enum + +from knack.log import get_logger +from knack.util import CLIError + +from azure.cli.command_modules.vm.azure_stack._vm_utils import get_target_network_api +from azure.cli.core.azclierror import ValidationError, InvalidArgumentValueError +from azure.cli.core.commands.arm import ArmTemplateBuilder +from azure.cli.core.profiles import ResourceType +from azure.cli.core.util import b64encode + +logger = get_logger(__name__) + + +# pylint: disable=too-few-public-methods +class StorageProfile(Enum): + SAPirImage = 1 + SACustomImage = 2 + SASpecializedOSDisk = 3 + ManagedPirImage = 4 # this would be the main scenarios + ManagedCustomImage = 5 + ManagedSpecializedOSDisk = 6 + SharedGalleryImage = 7 + CommunityGalleryImage = 8 + + +def build_deployment_resource(name, template, dependencies=None): + dependencies = dependencies or [] + deployment = { + 'name': name, + 'type': 'Microsoft.Resources/deployments', + 'apiVersion': '2015-01-01', + 'dependsOn': dependencies, + 'properties': { + 'mode': 'Incremental', + 'template': template, + } + } + return deployment + + +def build_output_deployment_resource(key, property_name, property_provider, property_type, + parent_name=None, output_type='object', path=None): + from azure.cli.core.util import random_string + output_tb = ArmTemplateBuilder() + output_tb.add_output(key, property_name, property_provider, property_type, + output_type=output_type, path=path) + output_template = output_tb.build() + + deployment_name = '{}_{}'.format(property_name, random_string(16)) + deployment = { + 'name': deployment_name, + 'type': 'Microsoft.Resources/deployments', + 'apiVersion': '2015-01-01', + 'properties': { + 'mode': 'Incremental', + 'template': output_template, + } + } + deployment['dependsOn'] = [] if not parent_name \ + else ['Microsoft.Resources/deployments/{}'.format(parent_name)] + + return deployment + + +def build_storage_account_resource(_, name, location, tags, sku, edge_zone=None): + storage_account = { + 'type': 'Microsoft.Storage/storageAccounts', + 'name': name, + 'apiVersion': '2015-06-15', + 'location': location, + 'tags': tags, + 'dependsOn': [], + 'properties': {'accountType': sku} + } + + if edge_zone: + storage_account['apiVersion'] = '2021-04-01' + storage_account['extendedLocation'] = edge_zone + + return storage_account + + +def build_public_ip_resource(cmd, name, location, tags, address_allocation, dns_name, sku, zone, count=None, + edge_zone=None): + public_ip_properties = {'publicIPAllocationMethod': address_allocation} + + if dns_name: + public_ip_properties['dnsSettings'] = {'domainNameLabel': dns_name} + + public_ip = { + 'apiVersion': get_target_network_api(cmd.cli_ctx), + 'type': 'Microsoft.Network/publicIPAddresses', + 'name': name, + 'location': location, + 'tags': tags, + 'dependsOn': [], + 'properties': public_ip_properties + } + + if count: + public_ip['name'] = "[concat('{}', copyIndex())]".format(name) + public_ip['copy'] = { + 'name': 'publicipcopy', + 'mode': 'parallel', + 'count': count + } + + # when multiple zones are provided(through a x-zone scale set), we don't propagate to PIP becasue it doesn't + # support x-zone; rather we will rely on the Standard LB to work with such scale sets + if zone and len(zone) == 1: + public_ip['zones'] = zone + + if sku and cmd.supported_api_version(ResourceType.MGMT_NETWORK, min_api='2017-08-01'): + public_ip['sku'] = {'name': sku} + + # The edge zones are only built out using Standard SKU Public IPs + if edge_zone and sku.lower() == 'standard': + public_ip['apiVersion'] = '2021-02-01' + public_ip['extendedLocation'] = edge_zone + + return public_ip + + +def build_nic_resource(_, name, location, tags, vm_name, subnet_id, private_ip_address=None, + nsg_id=None, public_ip_id=None, application_security_groups=None, accelerated_networking=None, + count=None, edge_zone=None): + private_ip_allocation = 'Static' if private_ip_address else 'Dynamic' + ip_config_properties = { + 'privateIPAllocationMethod': private_ip_allocation, + 'subnet': {'id': subnet_id} + } + + if private_ip_address: + ip_config_properties['privateIPAddress'] = private_ip_address + + if public_ip_id: + ip_config_properties['publicIPAddress'] = {'id': public_ip_id} + if count: + ip_config_properties['publicIPAddress']['id'] = "[concat('{}', copyIndex())]".format(public_ip_id) + + ipconfig_name = 'ipconfig{}'.format(vm_name) + nic_properties = { + 'ipConfigurations': [ + { + 'name': ipconfig_name, + 'properties': ip_config_properties + } + ] + } + if count: + nic_properties['ipConfigurations'][0]['name'] = "[concat('{}', copyIndex())]".format(ipconfig_name) + + if nsg_id: + nic_properties['networkSecurityGroup'] = {'id': nsg_id} + + api_version = '2015-06-15' + if application_security_groups: + asg_ids = [{'id': x['id']} for x in application_security_groups] + nic_properties['ipConfigurations'][0]['properties']['applicationSecurityGroups'] = asg_ids + api_version = '2017-09-01' + + if accelerated_networking is not None: + nic_properties['enableAcceleratedNetworking'] = accelerated_networking + api_version = '2016-09-01' if api_version < '2016-09-01' else api_version + + nic = { + 'apiVersion': api_version, + 'type': 'Microsoft.Network/networkInterfaces', + 'name': name, + 'location': location, + 'tags': tags, + 'dependsOn': [], + 'properties': nic_properties + } + + if count: + nic['name'] = "[concat('{}', copyIndex())]".format(name) + nic['copy'] = { + 'name': 'niccopy', + 'mode': 'parallel', + 'count': count + } + + if edge_zone: + nic['extendedLocation'] = edge_zone + nic['apiVersion'] = '2021-02-01' + + return nic + + +def build_nsg_resource(_, name, location, tags, nsg_rule): + nsg = { + 'type': 'Microsoft.Network/networkSecurityGroups', + 'name': name, + 'apiVersion': '2015-06-15', + 'location': location, + 'tags': tags, + 'dependsOn': [] + } + + if nsg_rule != 'NONE': + rule_name = 'rdp' if nsg_rule == 'RDP' else 'default-allow-ssh' + rule_dest_port = '3389' if nsg_rule == 'RDP' else '22' + + nsg_properties = { + 'securityRules': [ + { + 'name': rule_name, + 'properties': { + 'protocol': 'Tcp', + 'sourcePortRange': '*', + 'destinationPortRange': rule_dest_port, + 'sourceAddressPrefix': '*', + 'destinationAddressPrefix': '*', + 'access': 'Allow', + 'priority': 1000, + 'direction': 'Inbound' + } + } + ] + } + + nsg['properties'] = nsg_properties + + return nsg + + +def build_vnet_resource(_, name, location, tags, vnet_prefix=None, subnet=None, + subnet_prefix=None, dns_servers=None, edge_zone=None): + vnet = { + 'name': name, + 'type': 'Microsoft.Network/virtualNetworks', + 'location': location, + 'apiVersion': '2015-06-15', + 'dependsOn': [], + 'tags': tags, + 'properties': { + 'addressSpace': {'addressPrefixes': [vnet_prefix]}, + } + } + if dns_servers: + vnet['properties']['dhcpOptions'] = { + 'dnsServers': dns_servers + } + if subnet: + vnet['properties']['subnets'] = [{ + 'name': subnet, + 'properties': { + 'addressPrefix': subnet_prefix + } + }] + if edge_zone: + vnet['extendedLocation'] = edge_zone + vnet['apiVersion'] = '2021-02-01' + + return vnet + + +def build_msi_role_assignment(vm_vmss_name, vm_vmss_resource_id, role_definition_id, + role_assignment_guid, identity_scope, is_vm=True): + from azure.mgmt.core.tools import parse_resource_id + result = parse_resource_id(identity_scope) + if result.get('type'): # is a resource id? + name = '{}/Microsoft.Authorization/{}'.format(result['name'], role_assignment_guid) + assignment_type = '{}/{}/providers/roleAssignments'.format(result['namespace'], result['type']) + else: + name = role_assignment_guid + assignment_type = 'Microsoft.Authorization/roleAssignments' + + # pylint: disable=line-too-long + msi_rp_api_version = '2019-07-01' + return { + 'name': name, + 'type': assignment_type, + 'apiVersion': '2015-07-01', # the minimum api-version to create the assignment + 'dependsOn': [ + 'Microsoft.Compute/{}/{}'.format('virtualMachines' if is_vm else 'virtualMachineScaleSets', vm_vmss_name) + ], + 'properties': { + 'roleDefinitionId': role_definition_id, + 'principalId': "[reference('{}', '{}', 'Full').identity.principalId]".format( + vm_vmss_resource_id, msi_rp_api_version), + 'scope': identity_scope + } + } + + +def build_vm_resource( # pylint: disable=too-many-locals, too-many-statements, too-many-branches + cmd, name, location, tags, size, storage_profile, nics, admin_username, + availability_set_id=None, admin_password=None, ssh_key_values=None, ssh_key_path=None, + image_reference=None, os_disk_name=None, custom_image_os_type=None, authentication_type=None, + os_publisher=None, os_offer=None, os_sku=None, os_version=None, os_vhd_uri=None, + attach_os_disk=None, os_disk_size_gb=None, custom_data=None, secrets=None, license_type=None, zone=None, + disk_info=None, boot_diagnostics_storage_uri=None, ultra_ssd_enabled=None, proximity_placement_group=None, + computer_name=None, dedicated_host=None, priority=None, max_price=None, eviction_policy=None, + enable_agent=None, vmss=None, os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None, + encryption_at_host=None, dedicated_host_group=None, enable_auto_update=None, patch_mode=None, + enable_hotpatching=None, platform_fault_domain=None, security_type=None, enable_secure_boot=None, + enable_vtpm=None, count=None, edge_zone=None, os_disk_delete_option=None, user_data=None, + capacity_reservation_group=None, enable_hibernation=None, v_cpus_available=None, v_cpus_per_core=None, + os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, disk_controller_type=None, + enable_proxy_agent=None, proxy_agent_mode=None): + os_caching = disk_info['os'].get('caching') + + def _build_os_profile(): + + special_chars = '`~!@#$%^&*()=+_[]{}\\|;:\'\",<>/?' + + # _computer_name is used to avoid shadow names + _computer_name = computer_name or ''.join(filter(lambda x: x not in special_chars, name)) + + os_profile = { + # Use name as computer_name if it's not provided. Remove special characters from name. + 'computerName': _computer_name, + 'adminUsername': admin_username + } + + if count: + os_profile['computerName'] = "[concat('{}', copyIndex())]".format(_computer_name) + + if admin_password: + os_profile['adminPassword'] = "[parameters('adminPassword')]" + + if custom_data: + os_profile['customData'] = b64encode(custom_data) + + if ssh_key_values and ssh_key_path: + os_profile['linuxConfiguration'] = { + 'disablePasswordAuthentication': authentication_type == 'ssh', + 'ssh': { + 'publicKeys': [ + { + 'keyData': ssh_key_value, + 'path': ssh_key_path + } for ssh_key_value in ssh_key_values + ] + } + } + + if enable_agent is not None: + if custom_image_os_type.lower() == 'linux': + if 'linuxConfiguration' not in os_profile: + os_profile['linuxConfiguration'] = {} + os_profile['linuxConfiguration']['provisionVMAgent'] = enable_agent + elif custom_image_os_type.lower() == 'windows': + if 'windowsConfiguration' not in os_profile: + os_profile['windowsConfiguration'] = {} + os_profile['windowsConfiguration']['provisionVMAgent'] = enable_agent + + if secrets: + os_profile['secrets'] = secrets + + if enable_auto_update is not None and custom_image_os_type.lower() == 'windows': + os_profile['windowsConfiguration']['enableAutomaticUpdates'] = enable_auto_update + + # Windows patch settings + if patch_mode is not None and custom_image_os_type.lower() == 'windows': + if patch_mode.lower() not in ['automaticbyos', 'automaticbyplatform', 'manual']: + raise ValidationError( + 'Invalid value of --patch-mode for Windows VM. Valid values are AutomaticByOS, ' + 'AutomaticByPlatform, Manual.') + os_profile['windowsConfiguration']['patchSettings'] = { + 'patchMode': patch_mode, + 'enableHotpatching': enable_hotpatching + } + + # Linux patch settings + if patch_mode is not None and custom_image_os_type.lower() == 'linux': + if patch_mode.lower() not in ['automaticbyplatform', 'imagedefault']: + raise ValidationError( + 'Invalid value of --patch-mode for Linux VM. Valid values are AutomaticByPlatform, ImageDefault.') + os_profile['linuxConfiguration']['patchSettings'] = { + 'patchMode': patch_mode + } + + return os_profile + + def _build_storage_profile(): + + storage_profiles = { + 'SACustomImage': { + 'osDisk': { + 'createOption': 'fromImage', + 'name': os_disk_name, + 'caching': os_caching, + 'osType': custom_image_os_type, + 'image': {'uri': image_reference}, + 'vhd': {'uri': os_vhd_uri} + } + }, + 'SAPirImage': { + 'osDisk': { + 'createOption': 'fromImage', + 'name': os_disk_name, + 'caching': os_caching, + 'vhd': {'uri': os_vhd_uri} + }, + 'imageReference': { + 'publisher': os_publisher, + 'offer': os_offer, + 'sku': os_sku, + 'version': os_version + } + }, + 'SASpecializedOSDisk': { + 'osDisk': { + 'createOption': 'attach', + 'osType': custom_image_os_type, + 'name': os_disk_name, + 'vhd': {'uri': attach_os_disk} + } + }, + 'ManagedPirImage': { + 'osDisk': { + 'createOption': 'fromImage', + 'name': os_disk_name, + 'caching': os_caching, + 'managedDisk': { + 'storageAccountType': disk_info['os'].get('storageAccountType'), + } + }, + 'imageReference': { + 'publisher': os_publisher, + 'offer': os_offer, + 'sku': os_sku, + 'version': os_version + } + }, + 'ManagedCustomImage': { + 'osDisk': { + 'createOption': 'fromImage', + 'name': os_disk_name, + 'caching': os_caching, + 'managedDisk': { + 'storageAccountType': disk_info['os'].get('storageAccountType'), + } + }, + "imageReference": { + 'id': image_reference + } + }, + 'ManagedSpecializedOSDisk': { + 'osDisk': { + 'createOption': 'attach', + 'osType': custom_image_os_type, + 'managedDisk': { + 'id': attach_os_disk + } + } + }, + 'SharedGalleryImage': { + "osDisk": { + "caching": os_caching, + "managedDisk": { + "storageAccountType": disk_info['os'].get('storageAccountType'), + }, + "name": os_disk_name, + "createOption": "fromImage" + }, + "imageReference": { + 'sharedGalleryImageId': image_reference + } + }, + 'CommunityGalleryImage': { + "osDisk": { + "caching": os_caching, + "managedDisk": { + "storageAccountType": disk_info['os'].get('storageAccountType'), + }, + "name": os_disk_name, + "createOption": "fromImage" + }, + "imageReference": { + 'communityGalleryImageId': image_reference + } + } + } + if os_disk_encryption_set is not None: + storage_profiles['ManagedPirImage']['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set, + } + storage_profiles['ManagedCustomImage']['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set, + } + storage_profiles['SharedGalleryImage']['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set, + } + storage_profiles['CommunityGalleryImage']['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set, + } + if os_disk_security_encryption_type is not None: + storage_profiles['ManagedPirImage']['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type, + } + }) + storage_profiles['ManagedCustomImage']['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type, + } + }) + storage_profiles['SharedGalleryImage']['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type, + } + }) + storage_profiles['CommunityGalleryImage']['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type, + } + }) + if os_disk_secure_vm_disk_encryption_set is not None: + storage_profiles['ManagedPirImage']['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + storage_profiles['ManagedCustomImage']['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + storage_profiles['SharedGalleryImage']['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + storage_profiles['CommunityGalleryImage']['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + + profile = storage_profiles[storage_profile.name] + if os_disk_size_gb: + profile['osDisk']['diskSizeGb'] = os_disk_size_gb + if disk_info['os'].get('writeAcceleratorEnabled') is not None: + profile['osDisk']['writeAcceleratorEnabled'] = disk_info['os']['writeAcceleratorEnabled'] + if os_disk_delete_option is not None: + profile['osDisk']['deleteOption'] = os_disk_delete_option + data_disks = [v for k, v in disk_info.items() if k != 'os'] + if data_disk_encryption_sets: + if len(data_disk_encryption_sets) != len(data_disks): + raise CLIError( + 'usage error: Number of --data-disk-encryption-sets mismatches with number of data disks.') + for i, data_disk in enumerate(data_disks): + data_disk['managedDisk']['diskEncryptionSet'] = {'id': data_disk_encryption_sets[i]} + if data_disks: + profile['dataDisks'] = data_disks + + if disk_info['os'].get('diffDiskSettings'): + profile['osDisk']['diffDiskSettings'] = disk_info['os']['diffDiskSettings'] + + if disk_controller_type is not None: + profile['diskControllerType'] = disk_controller_type + + return profile + + vm_properties = {'hardwareProfile': {'vmSize': size}, 'networkProfile': {'networkInterfaces': nics}, + 'storageProfile': _build_storage_profile()} + + vm_size_properties = {} + if v_cpus_available is not None: + vm_size_properties['vCPUsAvailable'] = v_cpus_available + + if v_cpus_per_core is not None: + vm_size_properties['vCPUsPerCore'] = v_cpus_per_core + + if vm_size_properties: + vm_properties['hardwareProfile']['vmSizeProperties'] = vm_size_properties + + if availability_set_id: + vm_properties['availabilitySet'] = {'id': availability_set_id} + + # vmss is ID + if vmss is not None: + vm_properties['virtualMachineScaleSet'] = {'id': vmss} + + if not attach_os_disk and not specialized: + vm_properties['osProfile'] = _build_os_profile() + + if license_type: + vm_properties['licenseType'] = license_type + + if boot_diagnostics_storage_uri: + vm_properties['diagnosticsProfile'] = { + 'bootDiagnostics': { + "enabled": True, + "storageUri": boot_diagnostics_storage_uri + } + } + + if any((ultra_ssd_enabled, enable_hibernation)): + vm_properties['additionalCapabilities'] = {} + if ultra_ssd_enabled is not None: + vm_properties['additionalCapabilities']['ultraSSDEnabled'] = ultra_ssd_enabled + + if enable_hibernation is not None: + vm_properties['additionalCapabilities']['hibernationEnabled'] = enable_hibernation + + if proximity_placement_group: + vm_properties['proximityPlacementGroup'] = {'id': proximity_placement_group} + + if dedicated_host: + vm_properties['host'] = {'id': dedicated_host} + + if dedicated_host_group: + vm_properties['hostGroup'] = {'id': dedicated_host_group} + + if priority is not None: + vm_properties['priority'] = priority + + if eviction_policy is not None: + vm_properties['evictionPolicy'] = eviction_policy + + if max_price is not None: + vm_properties['billingProfile'] = {'maxPrice': max_price} + + vm_properties['securityProfile'] = {} + + if encryption_at_host is not None: + vm_properties['securityProfile']['encryptionAtHost'] = encryption_at_host + + proxy_agent_settings = {} + if enable_proxy_agent is not None: + proxy_agent_settings['enabled'] = enable_proxy_agent + + if proxy_agent_mode is not None: + proxy_agent_settings['mode'] = proxy_agent_mode + + if proxy_agent_settings: + vm_properties['securityProfile']['proxyAgentSettings'] = proxy_agent_settings + + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior + # after changing the default values to Trusted Launch VMs in the future. + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE + if security_type is not None and security_type != COMPATIBLE_SECURITY_TYPE_VALUE: + vm_properties['securityProfile']['securityType'] = security_type + + if enable_secure_boot is not None or enable_vtpm is not None: + vm_properties['securityProfile']['uefiSettings'] = { + 'secureBootEnabled': enable_secure_boot, + 'vTpmEnabled': enable_vtpm + } + + # Compatibility of various API versions + if vm_properties['securityProfile'] == {}: + del vm_properties['securityProfile'] + + if platform_fault_domain is not None: + vm_properties['platformFaultDomain'] = platform_fault_domain + + if user_data: + vm_properties['userData'] = b64encode(user_data) + + if capacity_reservation_group: + vm_properties['capacityReservation'] = { + 'capacityReservationGroup': { + 'id': capacity_reservation_group + } + } + + vm = { + 'apiVersion': cmd.get_api_version(ResourceType.MGMT_COMPUTE, operation_group='virtual_machines'), + 'type': 'Microsoft.Compute/virtualMachines', + 'name': name, + 'location': location, + 'tags': tags, + 'dependsOn': [], + 'properties': vm_properties, + } + + if zone: + vm['zones'] = zone + + if count: + vm['copy'] = { + 'name': 'vmcopy', + 'mode': 'parallel', + 'count': count + } + vm['name'] = "[concat('{}', copyIndex())]".format(name) + + if edge_zone: + vm['extendedLocation'] = edge_zone + + return vm + + +def _build_frontend_ip_config(name, public_ip_id=None, private_ip_address=None, + private_ip_allocation=None, subnet_id=None): + frontend_ip_config = { + 'name': name + } + + if public_ip_id: + frontend_ip_config.update({ + 'properties': { + 'publicIPAddress': { + 'id': public_ip_id + } + } + }) + else: + frontend_ip_config.update({ + 'properties': { + 'privateIPAllocationMethod': private_ip_allocation, + 'privateIPAddress': private_ip_address, + 'subnet': { + 'id': subnet_id + } + } + }) + return frontend_ip_config + + +def build_application_gateway_resource(_, name, location, tags, backend_pool_name, backend_port, frontend_ip_name, + public_ip_id, subnet_id, gateway_subnet_id, + private_ip_address, private_ip_allocation, sku, capacity): + frontend_ip_config = _build_frontend_ip_config(frontend_ip_name, public_ip_id, + private_ip_address, private_ip_allocation, + subnet_id) + + def _ag_subresource_id(_type, name): + return "[concat(variables('appGwID'), '/{}/{}')]".format(_type, name) + + frontend_ip_config_id = _ag_subresource_id('frontendIPConfigurations', frontend_ip_name) + frontend_port_id = _ag_subresource_id('frontendPorts', 'appGwFrontendPort') + http_listener_id = _ag_subresource_id('httpListeners', 'appGwHttpListener') + backend_address_pool_id = _ag_subresource_id('backendAddressPools', backend_pool_name) + backend_http_settings_id = _ag_subresource_id( + 'backendHttpSettingsCollection', 'appGwBackendHttpSettings') + + ag_properties = { + 'backendAddressPools': [ + { + 'name': backend_pool_name + } + ], + 'backendHttpSettingsCollection': [ + { + 'name': 'appGwBackendHttpSettings', + 'properties': { + 'Port': backend_port, + 'Protocol': 'Http', + 'CookieBasedAffinity': 'Disabled' + } + } + ], + 'frontendIPConfigurations': [frontend_ip_config], + 'frontendPorts': [ + { + 'name': 'appGwFrontendPort', + 'properties': { + 'Port': 80 + } + } + ], + 'gatewayIPConfigurations': [ + { + 'name': 'appGwIpConfig', + 'properties': { + 'subnet': {'id': gateway_subnet_id} + } + } + ], + 'httpListeners': [ + { + 'name': 'appGwHttpListener', + 'properties': { + 'FrontendIPConfiguration': {'Id': frontend_ip_config_id}, + 'FrontendPort': {'Id': frontend_port_id}, + 'Protocol': 'Http', + 'SslCertificate': None + } + } + ], + 'sku': { + 'name': sku, + 'tier': sku.split('_')[0], + 'capacity': capacity + }, + 'requestRoutingRules': [ + { + 'Name': 'rule1', + 'properties': { + 'RuleType': 'Basic', + 'httpListener': {'id': http_listener_id}, + 'backendAddressPool': {'id': backend_address_pool_id}, + 'backendHttpSettings': {'id': backend_http_settings_id} + } + } + ] + } + + ag = { + 'type': 'Microsoft.Network/applicationGateways', + 'name': name, + 'location': location, + 'tags': tags, + 'apiVersion': '2015-06-15', + 'dependsOn': [], + 'properties': ag_properties + } + return ag + + +def build_load_balancer_resource(cmd, name, location, tags, backend_pool_name, nat_pool_name, + backend_port, frontend_ip_name, public_ip_id, subnet_id, private_ip_address, + private_ip_allocation, sku, instance_count, disable_overprovision, edge_zone=None): + lb_id = "resourceId('Microsoft.Network/loadBalancers', '{}')".format(name) + + frontend_ip_config = _build_frontend_ip_config(frontend_ip_name, public_ip_id, + private_ip_address, private_ip_allocation, + subnet_id) + + lb_properties = { + 'backendAddressPools': [ + { + 'name': backend_pool_name + } + ], + 'frontendIPConfigurations': [frontend_ip_config] + } + if nat_pool_name: + lb_properties['inboundNatPools'] = [{ + 'name': nat_pool_name, + 'properties': { + 'frontendIPConfiguration': { + 'id': "[concat({}, '/frontendIPConfigurations/', '{}')]".format( + lb_id, frontend_ip_name) + }, + 'protocol': 'tcp', + 'frontendPortRangeStart': '50000', + # keep 50119 as minimum for backward compat, and ensure over-provision is taken care of + 'frontendPortRangeEnd': str(max(50119, 49999 + instance_count * (1 if disable_overprovision else 2))), + 'backendPort': backend_port + } + }] + + lb = { + 'type': 'Microsoft.Network/loadBalancers', + 'name': name, + 'location': location, + 'tags': tags, + 'apiVersion': get_target_network_api(cmd.cli_ctx), + 'dependsOn': [], + 'properties': lb_properties + } + if sku and cmd.supported_api_version(ResourceType.MGMT_NETWORK, min_api='2017-08-01'): + lb['sku'] = {'name': sku} + # LB rule is the way to enable SNAT so outbound connections are possible + if sku.lower() == 'standard': + lb_properties['loadBalancingRules'] = [{ + "name": "LBRule", + "properties": { + "frontendIPConfiguration": { + 'id': "[concat({}, '/frontendIPConfigurations/', '{}')]".format(lb_id, frontend_ip_name) + }, + "backendAddressPool": { + "id": "[concat({}, '/backendAddressPools/', '{}')]".format(lb_id, backend_pool_name) + }, + "protocol": "tcp", + "frontendPort": 80, + "backendPort": 80, + "enableFloatingIP": False, + "idleTimeoutInMinutes": 5, + } + }] + + if edge_zone: + lb['apiVersion'] = '2021-02-01' + lb['extendedLocation'] = edge_zone + + return lb + + +def build_nat_rule_v2(cmd, name, location, lb_name, frontend_ip_name, backend_pool_name, backend_port, instance_count, + disable_overprovision): + lb_id = "resourceId('Microsoft.Network/loadBalancers', '{}')".format(lb_name) + + nat_rule = { + "type": "Microsoft.Network/loadBalancers/inboundNatRules", + "apiVersion": get_target_network_api(cmd.cli_ctx), + "name": name, + "location": location, + "properties": { + "frontendIPConfiguration": { + 'id': "[concat({}, '/frontendIPConfigurations/', '{}')]".format(lb_id, frontend_ip_name) + }, + "backendAddressPool": { + "id": "[concat({}, '/backendAddressPools/', '{}')]".format(lb_id, backend_pool_name) + }, + "backendPort": backend_port, + "frontendPortRangeStart": "50000", + # This logic comes from the template of `inboundNatPools` to keep consistent with NAT pool + # keep 50119 as minimum for backward compat, and ensure over-provision is taken care of + "frontendPortRangeEnd": str(max(50119, 49999 + instance_count * (1 if disable_overprovision else 2))), + "protocol": "tcp", + "idleTimeoutInMinutes": 5 + }, + "dependsOn": [ + "[concat('Microsoft.Network/loadBalancers/', '{}')]".format(lb_name) + ] + } + + return nat_rule + + +def build_vmss_storage_account_pool_resource(_, loop_name, location, tags, storage_sku, edge_zone=None): + storage_resource = { + 'type': 'Microsoft.Storage/storageAccounts', + 'name': "[variables('storageAccountNames')[copyIndex()]]", + 'location': location, + 'tags': tags, + 'apiVersion': '2015-06-15', + 'copy': { + 'name': loop_name, + 'count': 5 + }, + 'properties': { + 'accountType': storage_sku + } + } + + if edge_zone: + storage_resource['apiVersion'] = '2021-04-01' + storage_resource['extendedLocation'] = edge_zone + + return storage_resource + + +# pylint: disable=too-many-locals, too-many-branches, too-many-statements, too-many-lines +def build_vmss_resource(cmd, name, computer_name_prefix, location, tags, overprovision, upgrade_policy_mode, + vm_sku, instance_count, ip_config_name, nic_name, subnet_id, + public_ip_per_vm, vm_domain_name, dns_servers, nsg, accelerated_networking, + admin_username, authentication_type, storage_profile, os_disk_name, disk_info, + os_type, image=None, admin_password=None, ssh_key_values=None, + ssh_key_path=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None, + backend_address_pool_id=None, inbound_nat_pool_id=None, health_probe=None, + single_placement_group=None, platform_fault_domain_count=None, custom_data=None, + secrets=None, license_type=None, zones=None, priority=None, eviction_policy=None, + application_security_groups=None, ultra_ssd_enabled=None, proximity_placement_group=None, + terminate_notification_time=None, max_price=None, scale_in_policy=None, + os_disk_encryption_set=None, data_disk_encryption_sets=None, + data_disk_iops=None, data_disk_mbps=None, automatic_repairs_grace_period=None, + specialized=None, os_disk_size_gb=None, encryption_at_host=None, host_group=None, + max_batch_instance_percent=None, max_unhealthy_instance_percent=None, + max_unhealthy_upgraded_instance_percent=None, pause_time_between_batches=None, + enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, edge_zone=None, + orchestration_mode=None, user_data=None, network_api_version=None, + enable_spot_restore=None, spot_restore_timeout=None, capacity_reservation_group=None, + enable_auto_update=None, patch_mode=None, enable_agent=None, security_type=None, + enable_secure_boot=None, enable_vtpm=None, automatic_repairs_action=None, v_cpus_available=None, + v_cpus_per_core=None, os_disk_security_encryption_type=None, + os_disk_secure_vm_disk_encryption_set=None, os_disk_delete_option=None, + regular_priority_count=None, regular_priority_percentage=None, disk_controller_type=None, + enable_osimage_notification=None, max_surge=None, enable_hibernation=None, + enable_auto_os_upgrade=None, enable_proxy_agent=None, proxy_agent_mode=None, + security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, + enable_resilient_vm_creation=None, enable_resilient_vm_deletion=None, + additional_scheduled_events=None, enable_user_reboot_scheduled_events=None, + enable_user_redeploy_scheduled_events=None, + skuprofile_vmsizes=None, skuprofile_allostrat=None): + # Build IP configuration + ip_configuration = {} + ip_config_properties = {} + + if subnet_id: + ip_config_properties['subnet'] = {'id': subnet_id} + + if public_ip_per_vm: + ip_config_properties['publicipaddressconfiguration'] = { + 'name': 'instancepublicip', + 'properties': { + 'idleTimeoutInMinutes': 10, + } + } + if vm_domain_name: + ip_config_properties['publicipaddressconfiguration']['properties']['dnsSettings'] = { + 'domainNameLabel': vm_domain_name + } + + if backend_address_pool_id: + key = 'loadBalancerBackendAddressPools' if 'loadBalancers' in backend_address_pool_id \ + else 'ApplicationGatewayBackendAddressPools' + ip_config_properties[key] = [ + {'id': backend_address_pool_id} + ] + + if inbound_nat_pool_id: + ip_config_properties['loadBalancerInboundNatPools'] = [ + {'id': inbound_nat_pool_id} + ] + + if application_security_groups and cmd.supported_api_version(min_api='2018-06-01', + operation_group='virtual_machine_scale_sets'): + ip_config_properties['applicationSecurityGroups'] = [{'id': x['id']} for x in application_security_groups] + + if ip_config_properties: + ip_configuration = { + 'name': ip_config_name, + 'properties': ip_config_properties + } + + # Build storage profile + storage_properties = {} + if disk_info: + os_caching = disk_info['os'].get('caching') + + if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]: + storage_properties['osDisk'] = { + 'name': os_disk_name, + 'caching': os_caching, + 'createOption': 'FromImage', + } + + if storage_profile == StorageProfile.SACustomImage: + storage_properties['osDisk'].update({ + 'osType': os_type, + 'image': { + 'uri': image + } + }) + else: + storage_properties['osDisk']['vhdContainers'] = "[variables('vhdContainers')]" + + if os_disk_size_gb is not None: + storage_properties['osDisk']['diskSizeGB'] = os_disk_size_gb + if os_disk_delete_option is not None: + storage_properties['osDisk']['deleteOption'] = os_disk_delete_option + + elif storage_profile in [StorageProfile.ManagedPirImage, StorageProfile.ManagedCustomImage]: + storage_properties['osDisk'] = { + 'createOption': 'FromImage', + 'caching': os_caching, + 'managedDisk': {'storageAccountType': disk_info['os'].get('storageAccountType')} + } + if os_disk_encryption_set is not None: + storage_properties['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set + } + if os_disk_security_encryption_type is not None: + storage_properties['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type + } + }) + if os_disk_secure_vm_disk_encryption_set is not None: + storage_properties['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + if disk_info and disk_info['os'].get('diffDiskSettings'): + storage_properties['osDisk']['diffDiskSettings'] = disk_info['os']['diffDiskSettings'] + + if os_disk_size_gb is not None: + storage_properties['osDisk']['diskSizeGB'] = os_disk_size_gb + if os_disk_delete_option is not None: + storage_properties['osDisk']['deleteOption'] = os_disk_delete_option + + if storage_profile in [StorageProfile.SAPirImage, StorageProfile.ManagedPirImage]: + storage_properties['imageReference'] = { + 'publisher': os_publisher, + 'offer': os_offer, + 'sku': os_sku, + 'version': os_version + } + if storage_profile == StorageProfile.ManagedCustomImage: + storage_properties['imageReference'] = { + 'id': image + } + if storage_profile == StorageProfile.SharedGalleryImage: + storage_properties['osDisk'] = { + 'caching': os_caching, + 'managedDisk': {'storageAccountType': disk_info['os'].get('storageAccountType')}, + "name": os_disk_name, + "createOption": "fromImage" + } + storage_properties['imageReference'] = { + 'sharedGalleryImageId': image + } + if os_disk_encryption_set is not None: + storage_properties['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set + } + if os_disk_security_encryption_type is not None: + storage_properties['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type, + } + }) + if os_disk_secure_vm_disk_encryption_set is not None: + storage_properties['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + if os_disk_delete_option is not None: + storage_properties['osDisk']['deleteOption'] = os_disk_delete_option + if storage_profile == StorageProfile.CommunityGalleryImage: + storage_properties['osDisk'] = { + 'caching': os_caching, + 'managedDisk': {'storageAccountType': disk_info['os'].get('storageAccountType')}, + "name": os_disk_name, + "createOption": "fromImage" + } + storage_properties['imageReference'] = { + 'communityGalleryImageId': image + } + if os_disk_encryption_set is not None: + storage_properties['osDisk']['managedDisk']['diskEncryptionSet'] = { + 'id': os_disk_encryption_set + } + if os_disk_security_encryption_type is not None: + storage_properties['osDisk']['managedDisk'].update({ + 'securityProfile': { + 'securityEncryptionType': os_disk_security_encryption_type, + } + }) + if os_disk_secure_vm_disk_encryption_set is not None: + storage_properties['osDisk']['managedDisk']['securityProfile'].update({ + 'diskEncryptionSet': { + 'id': os_disk_secure_vm_disk_encryption_set + } + }) + if os_disk_delete_option is not None: + storage_properties['osDisk']['deleteOption'] = os_disk_delete_option + + if disk_info: + data_disks = [v for k, v in disk_info.items() if k != 'os'] + else: + data_disks = [] + + if data_disk_encryption_sets: + if len(data_disk_encryption_sets) != len(data_disks): + raise CLIError( + 'usage error: Number of --data-disk-encryption-sets mismatches with number of data disks.') + for i, data_disk in enumerate(data_disks): + data_disk['managedDisk']['diskEncryptionSet'] = {'id': data_disk_encryption_sets[i]} + if data_disk_iops: + if len(data_disk_iops) != len(data_disks): + raise CLIError('usage error: Number of --data-disk-iops mismatches with number of data disks.') + for i, data_disk in enumerate(data_disks): + data_disk['diskIOPSReadWrite'] = data_disk_iops[i] + if data_disk_mbps: + if len(data_disk_mbps) != len(data_disks): + raise CLIError('usage error: Number of --data-disk-mbps mismatches with number of data disks.') + for i, data_disk in enumerate(data_disks): + data_disk['diskMBpsReadWrite'] = data_disk_mbps[i] + if data_disks: + storage_properties['dataDisks'] = data_disks + if disk_controller_type is not None: + storage_properties['diskControllerType'] = disk_controller_type + + # Build OS Profile + os_profile = {} + if computer_name_prefix: + os_profile['computerNamePrefix'] = computer_name_prefix + + if admin_username: + os_profile['adminUsername'] = admin_username + + if admin_password: + os_profile['adminPassword'] = "[parameters('adminPassword')]" + + if ssh_key_values and ssh_key_path: + os_profile['linuxConfiguration'] = { + 'disablePasswordAuthentication': authentication_type == 'ssh', + 'ssh': { + 'publicKeys': [ + { + 'path': ssh_key_path, + 'keyData': ssh_key_value + } for ssh_key_value in ssh_key_values + ] + } + } + + if custom_data: + os_profile['customData'] = b64encode(custom_data) + + if secrets: + os_profile['secrets'] = secrets + + if enable_agent is not None: + if os_type.lower() == 'linux': + if 'linuxConfiguration' not in os_profile: + os_profile['linuxConfiguration'] = {} + os_profile['linuxConfiguration']['provisionVMAgent'] = enable_agent + elif os_type.lower() == 'windows': + if 'windowsConfiguration' not in os_profile: + os_profile['windowsConfiguration'] = {} + os_profile['windowsConfiguration']['provisionVMAgent'] = enable_agent + + if enable_auto_update is not None and os_type.lower() == 'windows': + os_profile['windowsConfiguration']['enableAutomaticUpdates'] = enable_auto_update + + # Windows patch settings + if patch_mode is not None and os_type.lower() == 'windows': + if patch_mode.lower() not in ['automaticbyos', 'automaticbyplatform', 'manual']: + raise InvalidArgumentValueError( + 'Invalid value of --patch-mode for Windows VMSS. Valid values are AutomaticByOS, ' + 'AutomaticByPlatform, Manual.') + os_profile['windowsConfiguration']['patchSettings'] = { + 'patchMode': patch_mode + } + + # Linux patch settings + if patch_mode is not None and os_type.lower() == 'linux': + if patch_mode.lower() not in ['automaticbyplatform', 'imagedefault']: + raise InvalidArgumentValueError( + 'Invalid value of --patch-mode for Linux VMSS. Valid values are AutomaticByPlatform, ImageDefault.') + os_profile['linuxConfiguration']['patchSettings'] = { + 'patchMode': patch_mode + } + + # Build VMSS + nic_config = {} + nic_config_properties = {} + + if ip_configuration: + nic_config_properties['ipConfigurations'] = [ip_configuration] + + if cmd.supported_api_version(min_api='2017-03-30', operation_group='virtual_machine_scale_sets'): + if dns_servers: + nic_config_properties['dnsSettings'] = {'dnsServers': dns_servers} + + if accelerated_networking: + nic_config_properties['enableAcceleratedNetworking'] = True + + if nsg: + nic_config_properties['networkSecurityGroup'] = {'id': nsg} + + if nic_config_properties: + nic_config_properties['primary'] = 'true' + nic_config = { + 'name': nic_name, + 'properties': nic_config_properties + } + + vmss_properties = {} + network_profile = {} + virtual_machine_profile = {} + if nic_config: + network_profile['networkInterfaceConfigurations'] = [nic_config] + + if overprovision is not None: + vmss_properties['overprovision'] = overprovision + + if storage_properties: + virtual_machine_profile['storageProfile'] = storage_properties + + hardware_profile = {} + vm_size_properties = {} + if v_cpus_available is not None: + vm_size_properties['vCPUsAvailable'] = v_cpus_available + + if v_cpus_per_core is not None: + vm_size_properties['vCPUsPerCore'] = v_cpus_per_core + + if vm_size_properties: + hardware_profile['vmSizeProperties'] = vm_size_properties + + if hardware_profile: + virtual_machine_profile['hardwareProfile'] = hardware_profile + + if not specialized and os_profile: + virtual_machine_profile['osProfile'] = os_profile + + if upgrade_policy_mode: + vmss_properties['upgradePolicy'] = { + 'mode': upgrade_policy_mode + } + if upgrade_policy_mode and cmd.supported_api_version(min_api='2020-12-01', + operation_group='virtual_machine_scale_sets'): + rolling_upgrade_policy = {} + if max_batch_instance_percent is not None: + rolling_upgrade_policy['maxBatchInstancePercent'] = max_batch_instance_percent + + if max_unhealthy_instance_percent is not None: + rolling_upgrade_policy['maxUnhealthyInstancePercent'] = max_unhealthy_instance_percent + + if max_unhealthy_upgraded_instance_percent is not None: + rolling_upgrade_policy['maxUnhealthyUpgradedInstancePercent'] = max_unhealthy_upgraded_instance_percent + + if pause_time_between_batches is not None: + rolling_upgrade_policy['pauseTimeBetweenBatches'] = pause_time_between_batches + + if enable_cross_zone_upgrade is not None: + rolling_upgrade_policy['enableCrossZoneUpgrade'] = enable_cross_zone_upgrade + + if prioritize_unhealthy_instances is not None: + rolling_upgrade_policy['prioritizeUnhealthyInstances'] = prioritize_unhealthy_instances + + if max_surge is not None: + rolling_upgrade_policy['maxSurge'] = max_surge + + if rolling_upgrade_policy: + vmss_properties['upgradePolicy']['rollingUpgradePolicy'] = rolling_upgrade_policy + + if upgrade_policy_mode and cmd.supported_api_version(min_api='2018-10-01', + operation_group='virtual_machine_scale_sets'): + automatic_os_upgrade_policy = {} + if enable_auto_os_upgrade is not None: + automatic_os_upgrade_policy['enableAutomaticOSUpgrade'] = enable_auto_os_upgrade + + if automatic_os_upgrade_policy: + vmss_properties['upgradePolicy']['automaticOSUpgradePolicy'] = automatic_os_upgrade_policy + + if upgrade_policy_mode and upgrade_policy_mode.lower() == 'rolling' and \ + cmd.supported_api_version(min_api='2020-12-01', operation_group='virtual_machine_scale_sets'): + if os_type.lower() == 'linux': + from azure.cli.command_modules.vm._vmss_application_health import application_health_setting_for_linux + application_health_data = application_health_setting_for_linux + health_extension_name = 'ApplicationHealthLinux' + else: + from azure.cli.command_modules.vm._vmss_application_health import application_health_setting_for_windows + application_health_data = application_health_setting_for_windows + health_extension_name = 'ApplicationHealthWindows' + health_extension = [{ + "name": health_extension_name, + "properties": { + "publisher": "Microsoft.ManagedServices", + "type": health_extension_name, + "typeHandlerVersion": "1.0", + "autoUpgradeMinorVersion": True, + "settings": { + "port": 80, + "protocol": "http", + "requestPath": "/" + } + } + }] + virtual_machine_profile['extensionProfile'] = { + 'extensions': health_extension + } + os_profile['customData'] = b64encode(application_health_data) + + if enable_spot_restore and cmd.supported_api_version(min_api='2021-04-01', + operation_group='virtual_machine_scale_sets'): + vmss_properties['spotRestorePolicy'] = {} + if enable_spot_restore: + vmss_properties['spotRestorePolicy']['enabled'] = enable_spot_restore + + if spot_restore_timeout: + vmss_properties['spotRestorePolicy']['restoreTimeout'] = spot_restore_timeout + + if regular_priority_count is not None or regular_priority_percentage is not None: + priority_mix_policy = {} + if regular_priority_count is not None: + priority_mix_policy['baseRegularPriorityCount'] = regular_priority_count + if regular_priority_percentage is not None: + priority_mix_policy['regularPriorityPercentageAboveBase'] = regular_priority_percentage + vmss_properties['priorityMixPolicy'] = priority_mix_policy + + if license_type: + virtual_machine_profile['licenseType'] = license_type + + if health_probe and cmd.supported_api_version(min_api='2017-03-30', operation_group='virtual_machine_scale_sets'): + network_profile['healthProbe'] = {'id': health_probe} + + if network_api_version and \ + cmd.supported_api_version(min_api='2021-03-01', operation_group='virtual_machine_scale_sets'): + network_profile['networkApiVersion'] = network_api_version + + if cmd.supported_api_version(min_api='2016-04-30-preview', operation_group='virtual_machine_scale_sets'): + vmss_properties['singlePlacementGroup'] = single_placement_group + + if priority and cmd.supported_api_version(min_api='2017-12-01', operation_group='virtual_machine_scale_sets'): + virtual_machine_profile['priority'] = priority + + if eviction_policy and cmd.supported_api_version(min_api='2017-12-01', + operation_group='virtual_machine_scale_sets'): + virtual_machine_profile['evictionPolicy'] = eviction_policy + + if max_price is not None and cmd.supported_api_version( + min_api='2019-03-01', operation_group='virtual_machine_scale_sets'): + virtual_machine_profile['billingProfile'] = {'maxPrice': max_price} + + if platform_fault_domain_count is not None and cmd.supported_api_version( + min_api='2017-12-01', operation_group='virtual_machine_scale_sets'): + vmss_properties['platformFaultDomainCount'] = platform_fault_domain_count + + if ultra_ssd_enabled is not None: + if cmd.supported_api_version(min_api='2019-03-01', operation_group='virtual_machine_scale_sets'): + vmss_properties['additionalCapabilities'] = {'ultraSSDEnabled': ultra_ssd_enabled} + else: + virtual_machine_profile['additionalCapabilities'] = {'ultraSSDEnabled': ultra_ssd_enabled} + + if proximity_placement_group: + vmss_properties['proximityPlacementGroup'] = {'id': proximity_placement_group} + + scheduled_events_profile = {} + if terminate_notification_time is not None: + scheduled_events_profile.update({ + 'terminateNotificationProfile': { + 'notBeforeTimeout': terminate_notification_time, + 'enable': 'true' + } + }) + virtual_machine_profile['scheduledEventsProfile'] = scheduled_events_profile + + if enable_osimage_notification is not None: + scheduled_events_profile.update({ + 'osImageNotificationProfile': { + 'enable': enable_osimage_notification + } + }) + virtual_machine_profile['scheduledEventsProfile'] = scheduled_events_profile + + scheduled_events_policy = {} + if additional_scheduled_events is not None: + scheduled_events_policy.update({ + "scheduledEventsAdditionalPublishingTargets": { + "eventGridAndResourceGraph": { + "enable": additional_scheduled_events + } + } + }) + if enable_user_redeploy_scheduled_events is not None: + scheduled_events_policy.update({ + "userInitiatedRedeploy": { + "automaticallyApprove": enable_user_redeploy_scheduled_events + } + }) + if enable_user_reboot_scheduled_events is not None: + scheduled_events_policy.update({ + "userInitiatedReboot": { + "automaticallyApprove": enable_user_reboot_scheduled_events + } + }) + if scheduled_events_policy: + vmss_properties['scheduledEventsPolicy'] = scheduled_events_policy + + if automatic_repairs_grace_period is not None or automatic_repairs_action is not None: + automatic_repairs_policy = { + 'enabled': 'true', + 'gracePeriod': automatic_repairs_grace_period or 'PT10M', + 'repairAction': automatic_repairs_action or 'Replace' + } + vmss_properties['automaticRepairsPolicy'] = automatic_repairs_policy + + if scale_in_policy: + vmss_properties['scaleInPolicy'] = {'rules': scale_in_policy} + + if enable_resilient_vm_creation is not None or enable_resilient_vm_deletion is not None: + resiliency_policy = {} + if enable_resilient_vm_creation is not None: + resiliency_policy['resilientVMCreationPolicy'] = {'enabled': enable_resilient_vm_creation} + if enable_resilient_vm_deletion is not None: + resiliency_policy['resilientVMDeletionPolicy'] = {'enabled': enable_resilient_vm_deletion} + vmss_properties['resiliencyPolicy'] = resiliency_policy + + security_profile = {} + if encryption_at_host: + security_profile['encryptionAtHost'] = encryption_at_host + + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior + # after changing the default values to Trusted Launch VMs in the future. + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE + if security_type is not None and security_type != COMPATIBLE_SECURITY_TYPE_VALUE: + security_profile['securityType'] = security_type + + if enable_secure_boot is not None or enable_vtpm is not None: + security_profile['uefiSettings'] = { + 'secureBootEnabled': enable_secure_boot, + 'vTpmEnabled': enable_vtpm + } + + proxy_agent_settings = {} + if enable_proxy_agent is not None: + proxy_agent_settings['enabled'] = enable_proxy_agent + + if proxy_agent_mode is not None: + proxy_agent_settings['mode'] = proxy_agent_mode + + if proxy_agent_settings: + security_profile['proxyAgentSettings'] = proxy_agent_settings + + if security_profile: + virtual_machine_profile['securityProfile'] = security_profile + + if user_data: + virtual_machine_profile['userData'] = b64encode(user_data) + + if host_group: + vmss_properties['hostGroup'] = {'id': host_group} + + if network_profile: + virtual_machine_profile['networkProfile'] = network_profile + + if capacity_reservation_group: + virtual_machine_profile['capacityReservation'] = { + 'capacityReservationGroup': { + 'id': capacity_reservation_group + } + } + + if security_posture_reference_id: + virtual_machine_profile['securityPostureReference'] = { + 'id': security_posture_reference_id, + } + + if security_posture_reference_exclude_extensions: + security_posture_reference = virtual_machine_profile.get('securityPostureReference', {}) + security_posture_reference['excludeExtensions'] = security_posture_reference_exclude_extensions + virtual_machine_profile['securityPostureReference'] = security_posture_reference + + if virtual_machine_profile: + vmss_properties['virtualMachineProfile'] = virtual_machine_profile + + if orchestration_mode and cmd.supported_api_version(min_api='2020-06-01', + operation_group='virtual_machine_scale_sets'): + vmss_properties['orchestrationMode'] = orchestration_mode + + if enable_hibernation is not None: + if not vmss_properties.get('additionalCapabilities'): + vmss_properties['additionalCapabilities'] = {} + vmss_properties['additionalCapabilities']['hibernationEnabled'] = enable_hibernation + + if skuprofile_vmsizes: + sku_profile_vmsizes_list = [] + for vm_size in skuprofile_vmsizes: + vmsize_obj = { + 'name': vm_size + } + sku_profile_vmsizes_list.append(vmsize_obj) + sku_profile = { + 'vmSizes': sku_profile_vmsizes_list, + 'allocationStrategy': skuprofile_allostrat + } + vmss_properties['skuProfile'] = sku_profile + + vmss = { + 'type': 'Microsoft.Compute/virtualMachineScaleSets', + 'name': name, + 'location': location, + 'tags': tags, + 'apiVersion': cmd.get_api_version(ResourceType.MGMT_COMPUTE, operation_group='virtual_machine_scale_sets'), + 'dependsOn': [], + 'properties': vmss_properties + } + + if vm_sku: + vmss['sku'] = { + 'name': vm_sku, + 'capacity': instance_count + } + + if vmss_properties: + vmss['properties'] = vmss_properties + + if zones: + vmss['zones'] = zones + + if edge_zone: + vmss['extendedLocation'] = edge_zone + + return vmss + + +def build_av_set_resource(cmd, name, location, tags, platform_update_domain_count, + platform_fault_domain_count, unmanaged, proximity_placement_group=None): + av_set = { + 'type': 'Microsoft.Compute/availabilitySets', + 'name': name, + 'location': location, + 'tags': tags, + 'apiVersion': cmd.get_api_version(ResourceType.MGMT_COMPUTE, operation_group='availability_sets'), + "properties": { + 'platformFaultDomainCount': platform_fault_domain_count, + } + } + + if cmd.supported_api_version(min_api='2016-04-30-preview', operation_group='availability_sets'): + av_set['sku'] = { + 'name': 'Classic' if unmanaged else 'Aligned' + } + + # server defaults the UD to 5 unless set otherwise + if platform_update_domain_count is not None: + av_set['properties']['platformUpdateDomainCount'] = platform_update_domain_count + + if proximity_placement_group: + av_set['properties']['proximityPlacementGroup'] = {'id': proximity_placement_group} + + return av_set + + +def build_vm_linux_log_analytics_workspace_agent(_, vm_name, location): + ''' + This is used for log analytics workspace + ''' + mmaExtension_resource = { + 'type': 'Microsoft.Compute/virtualMachines/extensions', + 'apiVersion': '2018-10-01', + 'properties': { + 'publisher': 'Microsoft.EnterpriseCloud.Monitoring', + 'type': 'OmsAgentForLinux', + 'typeHandlerVersion': '1.0', + 'autoUpgradeMinorVersion': 'true', + 'settings': { + 'workspaceId': "[reference(parameters('workspaceId'), '2015-11-01-preview').customerId]", + 'stopOnMultipleConnections': 'true' + }, + 'protectedSettings': { + 'workspaceKey': "[listKeys(parameters('workspaceId'), '2015-11-01-preview').primarySharedKey]" + } + } + } + + mmaExtension_resource['name'] = vm_name + '/OmsAgentForLinux' + mmaExtension_resource['location'] = location + mmaExtension_resource['dependsOn'] = ['Microsoft.Compute/virtualMachines/' + vm_name] + return mmaExtension_resource + + +def build_vm_windows_log_analytics_workspace_agent(_, vm_name, location): + ''' + This function is used for log analytics workspace. + ''' + mmaExtension_resource = { + 'type': 'Microsoft.Compute/virtualMachines/extensions', + 'apiVersion': '2018-10-01', + 'properties': { + 'publisher': 'Microsoft.EnterpriseCloud.Monitoring', + 'type': 'MicrosoftMonitoringAgent', + 'typeHandlerVersion': '1.0', + 'autoUpgradeMinorVersion': 'true', + 'settings': { + 'workspaceId': "[reference(parameters('workspaceId'), '2015-11-01-preview').customerId]", + 'stopOnMultipleConnections': 'true' + }, + 'protectedSettings': { + 'workspaceKey': "[listKeys(parameters('workspaceId'), '2015-11-01-preview').primarySharedKey]" + } + } + } + + mmaExtension_resource['name'] = vm_name + '/MicrosoftMonitoringAgent' + mmaExtension_resource['location'] = location + mmaExtension_resource['dependsOn'] = ['Microsoft.Compute/virtualMachines/' + vm_name] + return mmaExtension_resource diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_validators.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_validators.py new file mode 100644 index 00000000000..b4094b033a6 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_validators.py @@ -0,0 +1,2696 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# pylint:disable=too-many-lines + +import os + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # pylint: disable=import-error + +from knack.log import get_logger +from knack.util import CLIError + +from azure.cli.core.azclierror import (ValidationError, ArgumentUsageError, RequiredArgumentMissingError, + MutuallyExclusiveArgumentError, CLIInternalError) +from azure.cli.core.commands.validators import ( + get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags) +from azure.cli.core.util import (hash_string, DISALLOWED_USER_NAMES, get_default_admin_username) +from azure.cli.command_modules.vm.azure_stack._vm_utils import (check_existence, get_storage_blob_uri, list_sku_info, + import_aaz_by_profile) +from azure.cli.command_modules.vm.azure_stack._template_builder import StorageProfile +from azure.cli.core import keys +from azure.core.exceptions import ResourceNotFoundError + +from ._client_factory import _compute_client_factory +from ._actions import _get_latest_image_version + +logger = get_logger(__name__) + + +def validate_asg_names_or_ids(cmd, namespace): + from azure.mgmt.core.tools import is_valid_resource_id, resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + resource_group = namespace.resource_group_name + subscription_id = get_subscription_id(cmd.cli_ctx) + names_or_ids = getattr(namespace, 'application_security_groups') + ids = [] + + if names_or_ids == [""] or not names_or_ids: + return + + for val in names_or_ids: + if not is_valid_resource_id(val): + val = resource_id( + subscription=subscription_id, + resource_group=resource_group, + namespace='Microsoft.Network', type='applicationSecurityGroups', + name=val + ) + ids.append({'id': val}) + setattr(namespace, 'application_security_groups', ids) + + +def validate_nsg_name(cmd, namespace): + from azure.mgmt.core.tools import resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + vm_id = resource_id(name=namespace.vm_name, resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', type='virtualMachines', + subscription=get_subscription_id(cmd.cli_ctx)) + namespace.network_security_group_name = namespace.network_security_group_name \ + or '{}_NSG_{}'.format(namespace.vm_name, hash_string(vm_id, length=8)) + + +def validate_keyvault(cmd, namespace): + namespace.keyvault = _get_resource_id(cmd.cli_ctx, namespace.keyvault, namespace.resource_group_name, + 'vaults', 'Microsoft.KeyVault') + + +def validate_vm_name_for_monitor_metrics(cmd, namespace): + if hasattr(namespace, 'resource'): + namespace.resource = _get_resource_id(cmd.cli_ctx, namespace.resource, namespace.resource_group_name, + 'virtualMachines', 'Microsoft.Compute') + elif hasattr(namespace, 'resource_uri'): + namespace.resource_uri = _get_resource_id(cmd.cli_ctx, namespace.resource_uri, namespace.resource_group_name, + 'virtualMachines', 'Microsoft.Compute') + del namespace.resource_group_name + + +def _validate_proximity_placement_group(cmd, namespace): + from azure.mgmt.core.tools import parse_resource_id + + if namespace.proximity_placement_group: + namespace.proximity_placement_group = _get_resource_id(cmd.cli_ctx, namespace.proximity_placement_group, + namespace.resource_group_name, + 'proximityPlacementGroups', 'Microsoft.Compute') + + parsed = parse_resource_id(namespace.proximity_placement_group) + rg, name = parsed['resource_group'], parsed['name'] + + if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'proximityPlacementGroups'): + raise CLIError("Proximity Placement Group '{}' does not exist.".format(name)) + + +def process_vm_secret_format(cmd, namespace): + from azure.mgmt.core.tools import is_valid_resource_id + from azure.cli.core._output import (get_output_format, set_output_format) + + keyvault_usage = CLIError('usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]') + kv = namespace.keyvault + rg = namespace.resource_group_name + + if rg: + if not kv or is_valid_resource_id(kv): + raise keyvault_usage + validate_keyvault(cmd, namespace) + else: + if kv and not is_valid_resource_id(kv): + raise keyvault_usage + + warning_msg = "This command does not support the {} output format. Showing JSON format instead." + desired_formats = ["json", "jsonc"] + + output_format = get_output_format(cmd.cli_ctx) + if output_format not in desired_formats: + warning_msg = warning_msg.format(output_format) + logger.warning(warning_msg) + set_output_format(cmd.cli_ctx, desired_formats[0]) + + +def _get_resource_group_from_vault_name(cli_ctx, vault_name): + """ + Fetch resource group from vault name + :param str vault_name: name of the key vault + :return: resource group name or None + :rtype: str + """ + from azure.cli.core.profiles import ResourceType + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.mgmt.core.tools import parse_resource_id + client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults + for vault in client.list(): + id_comps = parse_resource_id(vault.id) + if id_comps['name'] == vault_name: + return id_comps['resource_group'] + return None + + +def _get_resource_id(cli_ctx, val, resource_group, resource_type, resource_namespace): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if is_valid_resource_id(val): + return val + + kwargs = { + 'name': val, + 'resource_group': resource_group, + 'namespace': resource_namespace, + 'type': resource_type, + 'subscription': get_subscription_id(cli_ctx) + } + missing_kwargs = {k: v for k, v in kwargs.items() if not v} + + return resource_id(**kwargs) if not missing_kwargs else None + + +def _get_nic_id(cli_ctx, val, resource_group): + return _get_resource_id(cli_ctx, val, resource_group, + 'networkInterfaces', 'Microsoft.Network') + + +def validate_vm_nic(cmd, namespace): + namespace.nic = _get_nic_id(cmd.cli_ctx, namespace.nic, namespace.resource_group_name) + + +def validate_vm_nics(cmd, namespace): + rg = namespace.resource_group_name + nic_ids = [] + + for n in namespace.nics: + nic_ids.append(_get_nic_id(cmd.cli_ctx, n, rg)) + namespace.nics = nic_ids + + if hasattr(namespace, 'primary_nic') and namespace.primary_nic: + namespace.primary_nic = _get_nic_id(cmd.cli_ctx, namespace.primary_nic, rg) + + +def _validate_secrets(secrets, os_type): + """ + Validates a parsed JSON array containing secrets for use in VM Creation + Secrets JSON structure + [{ + "sourceVault": { "id": "value" }, + "vaultCertificates": [{ + "certificateUrl": "value", + "certificateStore": "cert store name (only on windows)" + }] + }] + :param dict secrets: Dict fitting the JSON description above + :param string os_type: the type of OS (linux or windows) + :return: errors if any were found + :rtype: list + """ + is_windows = os_type == 'windows' + errors = [] + + try: + loaded_secret = [validate_file_or_dict(secret) for secret in secrets] + except Exception as err: + raise CLIError('Error decoding secrets: {0}'.format(err)) + + for idx_arg, narg_secret in enumerate(loaded_secret): + for idx, secret in enumerate(narg_secret): + if 'sourceVault' not in secret: + errors.append( + 'Secret is missing sourceVault key at index {0} in arg {1}'.format( + idx, idx_arg)) + if 'sourceVault' in secret and 'id' not in secret['sourceVault']: + errors.append( + 'Secret is missing sourceVault.id key at index {0} in arg {1}'.format( + idx, idx_arg)) + if 'vaultCertificates' not in secret or not secret['vaultCertificates']: + err = 'Secret is missing vaultCertificates array or it is empty at index {0} in ' \ + 'arg {1} ' + errors.append(err.format(idx, idx_arg)) + else: + for jdx, cert in enumerate(secret['vaultCertificates']): + message = 'Secret is missing {0} within vaultCertificates array at secret ' \ + 'index {1} and vaultCertificate index {2} in arg {3}' + if 'certificateUrl' not in cert: + errors.append(message.format('certificateUrl', idx, jdx, idx_arg)) + if is_windows and 'certificateStore' not in cert: + errors.append(message.format('certificateStore', idx, jdx, idx_arg)) + + if errors: + raise CLIError('\n'.join(errors)) + + +# region VM Create Validators + + +# pylint: disable=too-many-return-statements +def _parse_image_argument(cmd, namespace): + """ Systematically determines what type is supplied for the --image parameter. Updates the + namespace and returns the type for subsequent processing. """ + from azure.mgmt.core.tools import is_valid_resource_id + from azure.core.exceptions import HttpResponseError + import re + + # 1 - check if a fully-qualified ID (assumes it is an image ID) + if is_valid_resource_id(namespace.image): + return 'image_id' + + from ._vm_utils import is_shared_gallery_image_id, is_community_gallery_image_id + if is_shared_gallery_image_id(namespace.image): + return 'shared_gallery_image_id' + + if is_community_gallery_image_id(namespace.image): + return 'community_gallery_image_id' + + # 2 - attempt to match an URN pattern + urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image) + if urn_match: + namespace.os_publisher = urn_match.group(1) + namespace.os_offer = urn_match.group(2) + namespace.os_sku = urn_match.group(3) + namespace.os_version = urn_match.group(4) + + if not any([namespace.plan_name, namespace.plan_product, namespace.plan_publisher]): + image_plan = _get_image_plan_info_if_exists(cmd, namespace) + if image_plan: + namespace.plan_name = image_plan.name + namespace.plan_product = image_plan.product + namespace.plan_publisher = image_plan.publisher + + return 'urn' + + # 3 - unmanaged vhd based images? + if urlparse(namespace.image).scheme and "://" in namespace.image: + return 'uri' + + # 4 - attempt to match an URN alias (most likely) + from azure.cli.command_modules.vm.azure_stack._actions import load_images_from_aliases_doc + import requests + try: + images = None + images = load_images_from_aliases_doc(cmd.cli_ctx) + matched = next((x for x in images if x['urnAlias'].lower() == namespace.image.lower()), None) + if matched: + namespace.os_publisher = matched['publisher'] + namespace.os_offer = matched['offer'] + namespace.os_sku = matched['sku'] + namespace.os_version = matched['version'] + if not any([namespace.plan_name, namespace.plan_product, namespace.plan_publisher]): + image_plan = _get_image_plan_info_if_exists(cmd, namespace) + if image_plan: + namespace.plan_name = image_plan.name + namespace.plan_product = image_plan.product + namespace.plan_publisher = image_plan.publisher + return 'urn' + except requests.exceptions.ConnectionError: + pass + + # 5 - check if an existing managed disk image resource + compute_client = _compute_client_factory(cmd.cli_ctx) + try: + compute_client.images.get(namespace.resource_group_name, namespace.image) + namespace.image = _get_resource_id(cmd.cli_ctx, namespace.image, namespace.resource_group_name, + 'images', 'Microsoft.Compute') + return 'image_id' + except HttpResponseError: + if images is not None: + err = 'Invalid image "{}". Use a valid image URN, custom image name, custom image id, ' \ + 'VHD blob URI, or pick an image from {}.\nSee vm create -h for more information ' \ + 'on specifying an image.'.format(namespace.image, [x['urnAlias'] for x in images]) + else: + err = 'Failed to connect to remote source of image aliases or find a local copy. Invalid image "{}". ' \ + 'Use a valid image URN, custom image name, custom image id, or VHD blob URI.\nSee vm ' \ + 'create -h for more information on specifying an image.'.format(namespace.image) + raise CLIError(err) + + +def _get_image_plan_info_if_exists(cmd, namespace): + try: + compute_client = _compute_client_factory(cmd.cli_ctx) + if namespace.os_version.lower() == 'latest': + image_version = _get_latest_image_version(cmd.cli_ctx, namespace.location, namespace.os_publisher, + namespace.os_offer, namespace.os_sku) + else: + image_version = namespace.os_version + + image = compute_client.virtual_machine_images.get(namespace.location, + namespace.os_publisher, + namespace.os_offer, + namespace.os_sku, + image_version) + + # pylint: disable=no-member + return image.plan + except ResourceNotFoundError as ex: + logger.warning("Querying the image of '%s' failed for an error '%s'. Configuring plan settings " + "will be skipped", namespace.image, ex.message) + + +# pylint: disable=inconsistent-return-statements, too-many-return-statements +def _get_storage_profile_description(profile): + if profile == StorageProfile.SACustomImage: + return 'create unmanaged OS disk created from generalized VHD' + if profile == StorageProfile.SAPirImage: + return 'create unmanaged OS disk from Azure Marketplace image' + if profile == StorageProfile.SASpecializedOSDisk: + return 'attach to existing unmanaged OS disk' + if profile == StorageProfile.ManagedCustomImage: + return 'create managed OS disk from custom image' + if profile == StorageProfile.ManagedPirImage: + return 'create managed OS disk from Azure Marketplace image' + if profile == StorageProfile.ManagedSpecializedOSDisk: + return 'attach existing managed OS disk' + if profile == StorageProfile.SharedGalleryImage: + return 'create OS disk from shared gallery image' + if profile == StorageProfile.CommunityGalleryImage: + return 'create OS disk from community gallery image' + + +def _validate_location(cmd, namespace, zone_info, size_info): + if not namespace.location: + get_default_location_from_resource_group(cmd, namespace) + if zone_info and size_info: + sku_infos = list_sku_info(cmd.cli_ctx, namespace.location) + temp = next((x for x in sku_infos if x.name.lower() == size_info.lower()), None) + # For Stack (compute - 2017-03-30), Resource_sku doesn't implement location_info property + if not hasattr(temp, 'location_info'): + return + if not temp or not [x for x in (temp.location_info or []) if x.zones]: + raise CLIError("{}'s location can't be used to create the VM/VMSS because availability zone is not yet " + "supported. Please use '--location' to specify a capable one. 'az vm list-skus' can be " + "used to find such locations".format(namespace.resource_group_name)) + + +# pylint: disable=too-many-branches, too-many-statements, too-many-locals +def _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=False): + from azure.mgmt.core.tools import parse_resource_id + + _validate_vm_vmss_create_ephemeral_placement(namespace) + + # specialized is only for image + if getattr(namespace, 'specialized', None) is not None and namespace.image is None: + raise CLIError('usage error: --specialized is only configurable when --image is specified.') + + # use minimal parameters to resolve the expected storage profile + if getattr(namespace, 'attach_os_disk', None) and not namespace.image: + if namespace.use_unmanaged_disk: + # STORAGE PROFILE #3 + namespace.storage_profile = StorageProfile.SASpecializedOSDisk + else: + # STORAGE PROFILE #6 + namespace.storage_profile = StorageProfile.ManagedSpecializedOSDisk + elif namespace.image and not getattr(namespace, 'attach_os_disk', None): + image_type = _parse_image_argument(cmd, namespace) + if image_type == 'uri': + # STORAGE PROFILE #2 + namespace.storage_profile = StorageProfile.SACustomImage + elif image_type == 'image_id': + # STORAGE PROFILE #5 + namespace.storage_profile = StorageProfile.ManagedCustomImage + elif image_type == 'shared_gallery_image_id': + namespace.storage_profile = StorageProfile.SharedGalleryImage + elif image_type == 'community_gallery_image_id': + namespace.storage_profile = StorageProfile.CommunityGalleryImage + elif image_type == 'urn': + if namespace.use_unmanaged_disk: + # STORAGE PROFILE #1 + namespace.storage_profile = StorageProfile.SAPirImage + else: + # STORAGE PROFILE #4 + namespace.storage_profile = StorageProfile.ManagedPirImage + else: + raise CLIError('Unrecognized image type: {}'.format(image_type)) + elif not namespace.image and not getattr(namespace, 'attach_os_disk', None): + namespace.image = 'MicrosoftWindowsServer:WindowsServer:2022-datacenter-azure-edition:latest' + _parse_image_argument(cmd, namespace) + namespace.storage_profile = StorageProfile.ManagedPirImage + if namespace.enable_secure_boot is None: + namespace.enable_secure_boot = True + if namespace.enable_vtpm is None: + namespace.enable_vtpm = True + if namespace.security_type is None: + namespace.security_type = 'TrustedLaunch' + else: + # did not specify image XOR attach-os-disk + raise CLIError('incorrect usage: --image IMAGE | --attach-os-disk DISK') + + auth_params = ['admin_password', 'admin_username', 'authentication_type', + 'generate_ssh_keys', 'ssh_dest_key_path', 'ssh_key_value'] + + # perform parameter validation for the specific storage profile + # start with the required/forbidden parameters for VM + if namespace.storage_profile == StorageProfile.ManagedPirImage: + required = ['image'] + forbidden = ['os_type', 'attach_os_disk', 'storage_account', + 'storage_container_name', 'use_unmanaged_disk'] + if for_scale_set: + forbidden.append('os_disk_name') + + elif namespace.storage_profile == StorageProfile.ManagedCustomImage: + required = ['image'] + forbidden = ['os_type', 'attach_os_disk', 'storage_account', + 'storage_container_name', 'use_unmanaged_disk'] + if for_scale_set: + forbidden.append('os_disk_name') + + elif namespace.storage_profile == StorageProfile.SharedGalleryImage: + required = ['image'] + forbidden = ['attach_os_disk', 'storage_account', 'storage_container_name', 'use_unmanaged_disk'] + + elif namespace.storage_profile == StorageProfile.CommunityGalleryImage: + required = ['image'] + forbidden = ['attach_os_disk', 'storage_account', 'storage_container_name', 'use_unmanaged_disk'] + + elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk: + required = ['os_type', 'attach_os_disk'] + forbidden = ['os_disk_name', 'os_caching', 'storage_account', 'ephemeral_os_disk', + 'storage_container_name', 'use_unmanaged_disk', 'storage_sku'] + auth_params + + elif namespace.storage_profile == StorageProfile.SAPirImage: + required = ['image', 'use_unmanaged_disk'] + forbidden = ['os_type', 'attach_os_disk', 'data_disk_sizes_gb', 'ephemeral_os_disk'] + + elif namespace.storage_profile == StorageProfile.SACustomImage: + required = ['image', 'os_type', 'use_unmanaged_disk'] + forbidden = ['attach_os_disk', 'data_disk_sizes_gb', 'ephemeral_os_disk'] + + elif namespace.storage_profile == StorageProfile.SASpecializedOSDisk: + required = ['os_type', 'attach_os_disk', 'use_unmanaged_disk'] + forbidden = ['os_disk_name', 'os_caching', 'image', 'storage_account', 'ephemeral_os_disk', + 'storage_container_name', 'data_disk_sizes_gb', 'storage_sku'] + auth_params + + else: + raise CLIError('Unrecognized storage profile: {}'.format(namespace.storage_profile)) + + logger.debug("storage profile '%s'", namespace.storage_profile) + + if for_scale_set: + # VMSS lacks some parameters, so scrub these out + props_to_remove = ['attach_os_disk', 'storage_account'] + for prop in props_to_remove: + if prop in required: + required.remove(prop) + if prop in forbidden: + forbidden.remove(prop) + + # set default storage SKU if not provided and using an image based OS + if not namespace.storage_sku and namespace.storage_profile in [StorageProfile.SAPirImage, + StorageProfile.SACustomImage]: # pylint: disable=line-too-long + namespace.storage_sku = ['Standard_LRS'] if for_scale_set else ['Premium_LRS'] + + if namespace.ultra_ssd_enabled is None and namespace.storage_sku: + for sku in namespace.storage_sku: + if 'ultrassd_lrs' in sku.lower(): + namespace.ultra_ssd_enabled = True + + # Now verify the presence of required and absence of forbidden parameters + validate_parameter_set( + namespace, required, forbidden, + description='storage profile: {}:'.format(_get_storage_profile_description(namespace.storage_profile))) + + image_data_disks = [] + if namespace.storage_profile == StorageProfile.ManagedCustomImage: + # extract additional information from a managed custom image + res = parse_resource_id(namespace.image) + namespace.aux_subscriptions = [res['subscription']] + compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription']) + if res['type'].lower() == 'images': + image_info = compute_client.images.get(res['resource_group'], res['name']) + namespace.os_type = image_info.storage_profile.os_disk.os_type + image_data_disks = image_info.storage_profile.data_disks or [] + image_data_disks = [{'lun': disk.lun} for disk in image_data_disks] + + elif res['type'].lower() == 'galleries': + image_info = compute_client.gallery_images.get(resource_group_name=res['resource_group'], + gallery_name=res['name'], + gallery_image_name=res['child_name_1']) + namespace.os_type = image_info.os_type + gallery_image_version = res.get('child_name_2', '') + if gallery_image_version.lower() in ['latest', '']: + image_version_infos = compute_client.gallery_image_versions.list_by_gallery_image( + resource_group_name=res['resource_group'], gallery_name=res['name'], + gallery_image_name=res['child_name_1']) + image_version_infos = [x for x in image_version_infos if not x.publishing_profile.exclude_from_latest] + if not image_version_infos: + raise CLIError('There is no latest image version exists for "{}"'.format(namespace.image)) + image_version_info = sorted(image_version_infos, key=lambda x: x.publishing_profile.published_date)[-1] + else: + image_version_info = compute_client.gallery_image_versions.get( + resource_group_name=res['resource_group'], gallery_name=res['name'], + gallery_image_name=res['child_name_1'], gallery_image_version_name=res['child_name_2']) + image_data_disks = image_version_info.storage_profile.data_disk_images or [] + image_data_disks = [{'lun': disk.lun} for disk in image_data_disks] + + else: + raise CLIError('usage error: unrecognized image information "{}"'.format(namespace.image)) + + # pylint: disable=no-member + + elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk: + # accept disk name or ID + namespace.attach_os_disk = _get_resource_id( + cmd.cli_ctx, namespace.attach_os_disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute') + + if getattr(namespace, 'attach_data_disks', None): + if not namespace.use_unmanaged_disk: + namespace.attach_data_disks = [_get_resource_id(cmd.cli_ctx, d, namespace.resource_group_name, 'disks', + 'Microsoft.Compute') for d in namespace.attach_data_disks] + + if namespace.storage_profile == StorageProfile.SharedGalleryImage: + + if namespace.location is None: + raise RequiredArgumentMissingError( + 'Please input the location of the shared gallery image through the parameter --location.') + + from ._vm_utils import parse_shared_gallery_image_id + image_info = parse_shared_gallery_image_id(namespace.image) + + from ._client_factory import cf_shared_gallery_image + shared_gallery_image_info = cf_shared_gallery_image(cmd.cli_ctx).get( + location=namespace.location, gallery_unique_name=image_info[0], gallery_image_name=image_info[1]) + + if namespace.os_type and namespace.os_type.lower() != shared_gallery_image_info.os_type.lower(): + raise ArgumentUsageError("The --os-type is not the correct os type of this shared gallery image, " + "the os type of this image should be {}".format(shared_gallery_image_info.os_type)) + namespace.os_type = shared_gallery_image_info.os_type + + if namespace.storage_profile == StorageProfile.CommunityGalleryImage: + + if namespace.location is None: + raise RequiredArgumentMissingError( + 'Please input the location of the community gallery image through the parameter --location.') + + from ._vm_utils import parse_community_gallery_image_id + image_info = parse_community_gallery_image_id(namespace.image) + + from ._client_factory import cf_community_gallery_image + community_gallery_image_info = cf_community_gallery_image(cmd.cli_ctx).get( + location=namespace.location, public_gallery_name=image_info[0], gallery_image_name=image_info[1]) + + if namespace.os_type and namespace.os_type.lower() != community_gallery_image_info.os_type.lower(): + raise ArgumentUsageError( + "The --os-type is not the correct os type of this community gallery image, " + "the os type of this image should be {}".format(community_gallery_image_info.os_type)) + namespace.os_type = community_gallery_image_info.os_type + + if getattr(namespace, 'security_type', None) == 'ConfidentialVM' and \ + not getattr(namespace, 'os_disk_security_encryption_type', None): + raise RequiredArgumentMissingError('usage error: "--os-disk-security-encryption-type" is required ' + 'when "--security-type" is specified as "ConfidentialVM"') + + if getattr(namespace, 'os_disk_secure_vm_disk_encryption_set', None) and \ + getattr(namespace, 'os_disk_security_encryption_type', None) != 'DiskWithVMGuestState': + raise ArgumentUsageError( + 'usage error: The "--os-disk-secure-vm-disk-encryption-set" can only be passed in ' + 'when "--os-disk-security-encryption-type" is "DiskWithVMGuestState"') + + os_disk_security_encryption_type = getattr(namespace, 'os_disk_security_encryption_type', None) + if os_disk_security_encryption_type and os_disk_security_encryption_type.lower() == 'nonpersistedtpm': + if ((getattr(namespace, 'security_type', None) != 'ConfidentialVM') or + not getattr(namespace, 'enable_vtpm', None)): + raise ArgumentUsageError( + 'usage error: The "--os-disk-security-encryption-type NonPersistedTPM" can only be passed in ' + 'when "--security-type" is "ConfidentialVM" and "--enable-vtpm" is True') + + if not namespace.os_type: + namespace.os_type = 'windows' if 'windows' in namespace.os_offer.lower() else 'linux' + + if getattr(namespace, 'source_snapshots_or_disks', None) and \ + getattr(namespace, 'source_snapshots_or_disks_size_gb', None): + if len(namespace.source_snapshots_or_disks) != len(namespace.source_snapshots_or_disks_size_gb): + raise ArgumentUsageError( + 'Length of --source-snapshots-or-disks, --source-snapshots-or-disks-size-gb must be same.') + elif getattr(namespace, 'source_snapshots_or_disks', None) or \ + getattr(namespace, 'source_snapshots_or_disks_size_gb', None): + raise ArgumentUsageError('usage error: --source-snapshots-or-disks and ' + '--source-snapshots-or-disks-size-gb must be used together') + + if getattr(namespace, 'source_disk_restore_point', None) and \ + getattr(namespace, 'source_disk_restore_point_size_gb', None): + if len(namespace.source_disk_restore_point) != len(namespace.source_disk_restore_point_size_gb): + raise ArgumentUsageError( + 'Length of --source-disk-restore-point, --source-disk-restore-point-size-gb must be same.') + elif getattr(namespace, 'source_disk_restore_point', None) or \ + getattr(namespace, 'source_disk_restore_point_size_gb', None): + raise ArgumentUsageError('usage error: --source-disk-restore-point and ' + '--source-disk-restore-point-size-gb must be used together') + + from ._vm_utils import normalize_disk_info + # attach_data_disks are not exposed yet for VMSS, so use 'getattr' to avoid crash + vm_size = (getattr(namespace, 'size', None) or getattr(namespace, 'vm_sku', None)) + + # pylint: disable=line-too-long + namespace.disk_info = normalize_disk_info(size=vm_size, + image_data_disks=image_data_disks, + data_disk_sizes_gb=namespace.data_disk_sizes_gb, + attach_data_disks=getattr(namespace, 'attach_data_disks', []), + storage_sku=namespace.storage_sku, + os_disk_caching=namespace.os_caching, + data_disk_cachings=namespace.data_caching, + ephemeral_os_disk=getattr(namespace, 'ephemeral_os_disk', None), + ephemeral_os_disk_placement=getattr(namespace, + 'ephemeral_os_disk_placement', None), + data_disk_delete_option=getattr( + namespace, 'data_disk_delete_option', None), + source_snapshots_or_disks=getattr(namespace, 'source_snapshots_or_disks', + None), + source_snapshots_or_disks_size_gb=getattr(namespace, + 'source_snapshots_or_disks_size_gb', + None), + source_disk_restore_point=getattr(namespace, 'source_disk_restore_point', + None), + source_disk_restore_point_size_gb=getattr(namespace, + 'source_disk_restore_point_size_gb', + None) + ) + + +def _validate_vm_create_storage_account(cmd, namespace): + from azure.mgmt.core.tools import parse_resource_id + if namespace.storage_account: + storage_id = parse_resource_id(namespace.storage_account) + rg = storage_id.get('resource_group', namespace.resource_group_name) + if check_existence(cmd.cli_ctx, storage_id['name'], rg, 'Microsoft.Storage', 'storageAccounts'): + # 1 - existing storage account specified + namespace.storage_account_type = 'existing' + logger.debug("using specified existing storage account '%s'", storage_id['name']) + else: + # 2 - params for new storage account specified + namespace.storage_account_type = 'new' + logger.debug("specified storage account '%s' not found and will be created", storage_id['name']) + else: + from azure.cli.core.profiles import ResourceType + from azure.cli.core.commands.client_factory import get_mgmt_service_client + storage_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE).storage_accounts + + # find storage account in target resource group that matches the VM's location + sku_tier = 'Standard' + for sku in namespace.storage_sku: + if 'Premium' in sku: + sku_tier = 'Premium' + break + + account = next( + (a for a in storage_client.list_by_resource_group(namespace.resource_group_name) + if a.sku.tier == sku_tier and a.location == namespace.location), None) + + if account: + # 3 - nothing specified - find viable storage account in target resource group + namespace.storage_account = account.name + namespace.storage_account_type = 'existing' + logger.debug("suitable existing storage account '%s' will be used", account.name) + else: + # 4 - nothing specified - create a new storage account + namespace.storage_account_type = 'new' + logger.debug('no suitable storage account found. One will be created.') + + +def _validate_vm_create_availability_set(cmd, namespace): + from azure.mgmt.core.tools import parse_resource_id, resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if namespace.availability_set: + as_id = parse_resource_id(namespace.availability_set) + name = as_id['name'] + rg = as_id.get('resource_group', namespace.resource_group_name) + + if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'availabilitySets'): + raise CLIError("Availability set '{}' does not exist.".format(name)) + + namespace.availability_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=rg, + namespace='Microsoft.Compute', + type='availabilitySets', + name=name) + logger.debug("adding to specified availability set '%s'", namespace.availability_set) + + +def _validate_vm_create_vmss(cmd, namespace): + from azure.mgmt.core.tools import parse_resource_id, resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if namespace.vmss: + as_id = parse_resource_id(namespace.vmss) + name = as_id['name'] + rg = as_id.get('resource_group', namespace.resource_group_name) + + if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'virtualMachineScaleSets'): + raise CLIError("virtual machine scale set '{}' does not exist.".format(name)) + + namespace.vmss = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=rg, + namespace='Microsoft.Compute', + type='virtualMachineScaleSets', + name=name) + logger.debug("adding to specified virtual machine scale set '%s'", namespace.vmss) + + +def _validate_vm_create_dedicated_host(cmd, namespace): + """ + "host": { + "$ref": "#/definitions/SubResource", + "description": "Specifies information about the dedicated host that the virtual machine resides in. +

Minimum api-version: 2018-10-01." + }, + "hostGroup": { + "$ref": "#/definitions/SubResource", + "description": "Specifies information about the dedicated host group that the virtual machine resides in. +

Minimum api-version: 2020-06-01.

NOTE: User cannot specify both host and hostGroup properties." + } + + :param cmd: + :param namespace: + :return: + """ + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + if namespace.dedicated_host and namespace.dedicated_host_group: + raise CLIError('usage error: User cannot specify both --host and --host-group properties.') + + if namespace.dedicated_host and not is_valid_resource_id(namespace.dedicated_host): + raise CLIError('usage error: --host is not a valid resource ID.') + + if namespace.dedicated_host_group: + if not is_valid_resource_id(namespace.dedicated_host_group): + namespace.dedicated_host_group = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', type='hostGroups', name=namespace.dedicated_host_group + ) + + +def _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=False): + from azure.mgmt.core.tools import is_valid_resource_id + vnet = namespace.vnet_name + subnet = namespace.subnet + rg = namespace.resource_group_name + location = namespace.location + nics = getattr(namespace, 'nics', None) + + if vnet and '/' in vnet: + raise CLIError("incorrect usage: --subnet ID | --subnet NAME --vnet-name NAME") + + if not vnet and not subnet and not nics: + logger.debug('no subnet specified. Attempting to find an existing Vnet and subnet...') + + # if nothing specified, try to find an existing vnet and subnet in the target resource group + VnetList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet").List + + vnet_list = VnetList(cli_ctx=cmd.cli_ctx)(command_args={ + "resource_group": rg + }) + + # find VNET in target resource group that matches the VM's location with a matching subnet + for vnet_match in (v for v in vnet_list if bool(v['location'] == location and v['subnets'])): + # 1 - find a suitable existing vnet/subnet + result = None + if not for_scale_set: + result = next((s for s in vnet_match['subnets'] if s['name'].lower() != 'gatewaysubnet'), None) + else: + def _check_subnet(s): + if s['name'].lower() == 'gatewaysubnet': + return False + subnet_mask = s['addressPrefix'].split('/')[-1] + return _subnet_capacity_check(subnet_mask, namespace.instance_count, + not namespace.disable_overprovision) + + result = next((s for s in vnet_match['subnets'] if _check_subnet(s)), None) + if not result: + continue + namespace.subnet = result['name'] + namespace.vnet_name = vnet_match['name'] + namespace.vnet_type = 'existing' + logger.debug("existing vnet '%s' and subnet '%s' found", namespace.vnet_name, namespace.subnet) + return + + if subnet: + subnet_is_id = is_valid_resource_id(subnet) + if (subnet_is_id and vnet) or (not subnet_is_id and not vnet): + raise CLIError("incorrect usage: --subnet ID | --subnet NAME --vnet-name NAME") + + subnet_exists = \ + check_existence(cmd.cli_ctx, subnet, rg, 'Microsoft.Network', 'subnets', vnet, 'virtualNetworks') + + if subnet_is_id and not subnet_exists: + raise CLIError("Subnet '{}' does not exist.".format(subnet)) + if subnet_exists: + # 2 - user specified existing vnet/subnet + namespace.vnet_type = 'existing' + logger.debug("using specified vnet '%s' and subnet '%s'", namespace.vnet_name, namespace.subnet) + return + # 3 - create a new vnet/subnet + namespace.vnet_type = 'new' + logger.debug('no suitable subnet found. One will be created.') + + +def _subnet_capacity_check(subnet_mask, vmss_instance_count, over_provision): + mask = int(subnet_mask) + # '2' are the reserved broadcasting addresses + # '*1.5' so we have enough leeway for over-provision + factor = 1.5 if over_provision else 1 + return ((1 << (32 - mask)) - 2) > int(vmss_instance_count * factor) + + +def _validate_vm_vmss_accelerated_networking(cli_ctx, namespace): + if namespace.accelerated_networking is None: + size = getattr(namespace, 'size', None) or getattr(namespace, 'vm_sku', None) + size = size.lower() + + # Use the following code to refresh the list + # skus = list_sku_info(cli_ctx, namespace.location) + # aval_sizes = [x.name.lower() for x in skus if x.resource_type == 'virtualMachines' and + # any(c.name == 'AcceleratedNetworkingEnabled' and c.value == 'True' for c in x.capabilities)] + + aval_sizes = ['standard_b12ms', 'standard_b16ms', 'standard_b20ms', 'standard_ds2_v2', 'standard_ds3_v2', + 'standard_ds4_v2', 'standard_ds5_v2', 'standard_ds11-1_v2', 'standard_ds11_v2', + 'standard_ds12-1_v2', 'standard_ds12-2_v2', 'standard_ds12_v2', 'standard_ds13-2_v2', + 'standard_ds13-4_v2', 'standard_ds13_v2', 'standard_ds14-4_v2', 'standard_ds14-8_v2', + 'standard_ds14_v2', 'standard_ds15_v2', 'standard_ds2_v2_promo', 'standard_ds3_v2_promo', + 'standard_ds4_v2_promo', 'standard_ds5_v2_promo', 'standard_ds11_v2_promo', + 'standard_ds12_v2_promo', 'standard_ds13_v2_promo', 'standard_ds14_v2_promo', 'standard_f2s', + 'standard_f4s', 'standard_f8s', 'standard_f16s', 'standard_d4s_v3', 'standard_d8s_v3', + 'standard_d16s_v3', 'standard_d32s_v3', 'standard_d2_v2', 'standard_d3_v2', 'standard_d4_v2', + 'standard_d5_v2', 'standard_d11_v2', 'standard_d12_v2', 'standard_d13_v2', 'standard_d14_v2', + 'standard_d15_v2', 'standard_d2_v2_promo', 'standard_d3_v2_promo', 'standard_d4_v2_promo', + 'standard_d5_v2_promo', 'standard_d11_v2_promo', 'standard_d12_v2_promo', 'standard_d13_v2_promo', + 'standard_d14_v2_promo', 'standard_f2', 'standard_f4', 'standard_f8', 'standard_f16', + 'standard_d4_v3', 'standard_d8_v3', 'standard_d16_v3', 'standard_d32_v3', 'standard_d48_v3', + 'standard_d64_v3', 'standard_d48s_v3', 'standard_d64s_v3', 'standard_e4_v3', 'standard_e8_v3', + 'standard_e16_v3', 'standard_e20_v3', 'standard_e32_v3', 'standard_e48_v3', 'standard_e64i_v3', + 'standard_e64_v3', 'standard_e4-2s_v3', 'standard_e4s_v3', 'standard_e8-2s_v3', + 'standard_e8-4s_v3', 'standard_e8s_v3', 'standard_e16-4s_v3', 'standard_e16-8s_v3', + 'standard_e16s_v3', 'standard_e20s_v3', 'standard_e32-8s_v3', 'standard_e32-16s_v3', + 'standard_e32s_v3', 'standard_e48s_v3', 'standard_e64-16s_v3', 'standard_e64-32s_v3', + 'standard_e64is_v3', 'standard_e64s_v3', 'standard_l8s_v2', 'standard_l16s_v2', + 'standard_l32s_v2', 'standard_l48s_v2', 'standard_l64s_v2', 'standard_l80s_v2', 'standard_e4_v4', + 'standard_e8_v4', 'standard_e16_v4', 'standard_e20_v4', 'standard_e32_v4', 'standard_e48_v4', + 'standard_e64_v4', 'standard_e4d_v4', 'standard_e8d_v4', 'standard_e16d_v4', 'standard_e20d_v4', + 'standard_e32d_v4', 'standard_e48d_v4', 'standard_e64d_v4', 'standard_e4-2s_v4', + 'standard_e4s_v4', 'standard_e8-2s_v4', 'standard_e8-4s_v4', 'standard_e8s_v4', + 'standard_e16-4s_v4', 'standard_e16-8s_v4', 'standard_e16s_v4', 'standard_e20s_v4', + 'standard_e32-8s_v4', 'standard_e32-16s_v4', 'standard_e32s_v4', 'standard_e48s_v4', + 'standard_e64-16s_v4', 'standard_e64-32s_v4', 'standard_e64s_v4', 'standard_e4-2ds_v4', + 'standard_e4ds_v4', 'standard_e8-2ds_v4', 'standard_e8-4ds_v4', 'standard_e8ds_v4', + 'standard_e16-4ds_v4', 'standard_e16-8ds_v4', 'standard_e16ds_v4', 'standard_e20ds_v4', + 'standard_e32-8ds_v4', 'standard_e32-16ds_v4', 'standard_e32ds_v4', 'standard_e48ds_v4', + 'standard_e64-16ds_v4', 'standard_e64-32ds_v4', 'standard_e64ds_v4', 'standard_d4d_v4', + 'standard_d8d_v4', 'standard_d16d_v4', 'standard_d32d_v4', 'standard_d48d_v4', 'standard_d64d_v4', + 'standard_d4_v4', 'standard_d8_v4', 'standard_d16_v4', 'standard_d32_v4', 'standard_d48_v4', + 'standard_d64_v4', 'standard_d4ds_v4', 'standard_d8ds_v4', 'standard_d16ds_v4', + 'standard_d32ds_v4', 'standard_d48ds_v4', 'standard_d64ds_v4', 'standard_d4s_v4', + 'standard_d8s_v4', 'standard_d16s_v4', 'standard_d32s_v4', 'standard_d48s_v4', 'standard_d64s_v4', + 'standard_f4s_v2', 'standard_f8s_v2', 'standard_f16s_v2', 'standard_f32s_v2', 'standard_f48s_v2', + 'standard_f64s_v2', 'standard_f72s_v2', 'standard_m208ms_v2', 'standard_m208s_v2', + 'standard_m416-208s_v2', 'standard_m416s_v2', 'standard_m416-208ms_v2', 'standard_m416ms_v2', + 'standard_m64', 'standard_m64m', 'standard_m128', 'standard_m128m', 'standard_m8-2ms', + 'standard_m8-4ms', 'standard_m8ms', 'standard_m16-4ms', 'standard_m16-8ms', 'standard_m16ms', + 'standard_m32-8ms', 'standard_m32-16ms', 'standard_m32ls', 'standard_m32ms', 'standard_m32ts', + 'standard_m64-16ms', 'standard_m64-32ms', 'standard_m64ls', 'standard_m64ms', 'standard_m64s', + 'standard_m128-32ms', 'standard_m128-64ms', 'standard_m128ms', 'standard_m128s', + 'standard_d4a_v4', 'standard_d8a_v4', 'standard_d16a_v4', 'standard_d32a_v4', 'standard_d48a_v4', + 'standard_d64a_v4', 'standard_d96a_v4', 'standard_d4as_v4', 'standard_d8as_v4', + 'standard_d16as_v4', 'standard_d32as_v4', 'standard_d48as_v4', 'standard_d64as_v4', + 'standard_d96as_v4', 'standard_e4a_v4', 'standard_e8a_v4', 'standard_e16a_v4', 'standard_e20a_v4', + 'standard_e32a_v4', 'standard_e48a_v4', 'standard_e64a_v4', 'standard_e96a_v4', + 'standard_e4as_v4', 'standard_e8as_v4', 'standard_e16as_v4', 'standard_e20as_v4', + 'standard_e32as_v4', 'standard_e48as_v4', 'standard_e64as_v4', 'standard_e96as_v4'] + if size not in aval_sizes: + return + + new_4core_sizes = ['Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D3_v2_ABC', 'Standard_DS3_v2', + 'Standard_DS3_v2_Promo', 'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D12_v2_ABC', + 'Standard_DS12_v2', 'Standard_DS12_v2_Promo', 'Standard_F8s_v2', 'Standard_F4', + 'Standard_F4_ABC', 'Standard_F4s', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D8_v3', + 'Standard_D8s_v3'] + new_4core_sizes = [x.lower() for x in new_4core_sizes] + if size not in new_4core_sizes: + compute_client = _compute_client_factory(cli_ctx) + sizes = compute_client.virtual_machine_sizes.list(namespace.location) + size_info = next((s for s in sizes if s.name.lower() == size), None) + if size_info is None or size_info.number_of_cores < 8: + return + + # VMs need to be a supported image in the marketplace + # Ubuntu 16.04 | 18.04, SLES 12 SP3, RHEL 7.4, CentOS 7.4, Flatcar, Debian "Stretch" with backports kernel + # Oracle Linux 7.4, Windows Server 2016, Windows Server 2012R2 + publisher, offer, sku = namespace.os_publisher, namespace.os_offer, namespace.os_sku + if not publisher: + return + publisher, offer, sku = publisher.lower(), offer.lower(), sku.lower() + + if publisher == 'coreos' or offer == 'coreos': + from azure.cli.core.parser import InvalidArgumentValueError + raise InvalidArgumentValueError("As CoreOS is deprecated and there is no image in the marketplace any more," + " please use Flatcar Container Linux instead.") + + distros = [('canonical', 'UbuntuServer', '^16.04|^18.04'), + ('suse', 'sles', '^12-sp3'), ('redhat', 'rhel', '^7.4'), + ('openlogic', 'centos', '^7.4'), ('kinvolk', 'flatcar-container-linux-free', None), + ('kinvolk', 'flatcar-container-linux', None), ('credativ', 'debian', '-backports'), + ('oracle', 'oracle-linux', '^7.4'), ('MicrosoftWindowsServer', 'WindowsServer', '^2016'), + ('MicrosoftWindowsServer', 'WindowsServer', '^2012-R2')] + import re + for p, o, s in distros: + if p.lower() == publisher and (o is None or o.lower() == offer) and (s is None or re.match(s, sku, re.I)): + namespace.accelerated_networking = True + + +def _validate_vmss_create_subnet(namespace): + if namespace.vnet_type == 'new': + if namespace.subnet_address_prefix is None: + cidr = namespace.vnet_address_prefix.split('/', 1)[0] + i = 0 + for i in range(24, 16, -1): + if _subnet_capacity_check(i, namespace.instance_count, not namespace.disable_overprovision): + break + if i < 16: + err = "instance count '{}' is out of range of 2^16 subnet size'" + raise CLIError(err.format(namespace.instance_count)) + namespace.subnet_address_prefix = '{}/{}'.format(cidr, i) + + if namespace.app_gateway_type and namespace.app_gateway_subnet_address_prefix is None: + namespace.app_gateway_subnet_address_prefix = _get_next_subnet_addr_suffix( + namespace.vnet_address_prefix, namespace.subnet_address_prefix, 24) + + +def _get_next_subnet_addr_suffix(vnet_cidr, subnet_cidr, new_mask): + def _convert_to_int(address, bit_mask_len): + a, b, c, d = [int(x) for x in address.split('.')] + result = '{0:08b}{1:08b}{2:08b}{3:08b}'.format(a, b, c, d) + return int(result[:-bit_mask_len], 2) + + error_msg = "usage error: --subnet-address-prefix value should be a subrange of --vnet-address-prefix's" + # extract vnet information needed to verify the defaults we are coming out + vnet_ip_address, mask = vnet_cidr.split('/') + vnet_bit_mask_len = 32 - int(mask) + vnet_int = _convert_to_int(vnet_ip_address, vnet_bit_mask_len) + + subnet_ip_address, mask = subnet_cidr.split('/') + subnet_bit_mask_len = 32 - int(mask) + + if vnet_bit_mask_len <= subnet_bit_mask_len: + raise CLIError(error_msg) + + candidate_int = _convert_to_int(subnet_ip_address, subnet_bit_mask_len) + 1 + if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: # overflows? + candidate_int = candidate_int - 2 # try the other way around + if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: + raise CLIError(error_msg) + + # format back to the cidr + candaidate_str = '{0:32b}'.format(candidate_int << subnet_bit_mask_len) + return '{0}.{1}.{2}.{3}/{4}'.format(int(candaidate_str[0:8], 2), int(candaidate_str[8:16], 2), + int(candaidate_str[16:24], 2), int(candaidate_str[24:32], 2), + new_mask) + + +def _validate_vm_create_nsg(cmd, namespace): + if namespace.nsg: + if check_existence(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name, + 'Microsoft.Network', 'networkSecurityGroups'): + namespace.nsg_type = 'existing' + logger.debug("using specified NSG '%s'", namespace.nsg) + else: + namespace.nsg_type = 'new' + logger.debug("specified NSG '%s' not found. It will be created.", namespace.nsg) + elif namespace.nsg == '': + namespace.nsg_type = None + logger.debug('no NSG will be used') + elif namespace.nsg is None: + namespace.nsg_type = 'new' + logger.debug('new NSG will be created') + + +def _validate_vmss_create_nsg(cmd, namespace): + if namespace.nsg: + namespace.nsg = _get_resource_id(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name, + 'networkSecurityGroups', 'Microsoft.Network') + + +def _validate_vm_vmss_create_public_ip(cmd, namespace): + if namespace.public_ip_address: + if check_existence(cmd.cli_ctx, namespace.public_ip_address, namespace.resource_group_name, + 'Microsoft.Network', 'publicIPAddresses'): + namespace.public_ip_address_type = 'existing' + logger.debug("using existing specified public IP '%s'", namespace.public_ip_address) + else: + namespace.public_ip_address_type = 'new' + logger.debug("specified public IP '%s' not found. It will be created.", namespace.public_ip_address) + elif namespace.public_ip_address == '': + namespace.public_ip_address_type = None + logger.debug('no public IP address will be used') + elif namespace.public_ip_address is None: + namespace.public_ip_address_type = 'new' + logger.debug('new public IP address will be created') + + # Use standard public IP address automatically when using zones. + if hasattr(namespace, 'zone') and namespace.zone is not None: + namespace.public_ip_sku = 'Standard' + + # Public-IP SKU is only exposed for VM. VMSS has no such needs so far + if getattr(namespace, 'public_ip_sku', None): + if namespace.public_ip_sku == 'Standard': + if not namespace.public_ip_address_allocation: + namespace.public_ip_address_allocation = 'Static' + + +def _validate_vmss_create_public_ip(cmd, namespace): + if namespace.load_balancer_type is None and namespace.app_gateway_type is None: + if namespace.public_ip_address: + raise CLIError('--public-ip-address can only be used when creating a new load ' + 'balancer or application gateway frontend.') + namespace.public_ip_address = '' + _validate_vm_vmss_create_public_ip(cmd, namespace) + + +def validate_delete_options(resources, delete_option): + """ Extracts multiple space-separated delete_option in key[=value] format """ + if resources and isinstance(delete_option, list): + if len(delete_option) == 1 and len(delete_option[0].split('=', 1)) == 1: + return delete_option[0] + delete_option_dict = {} + for item in delete_option: + delete_option_dict.update(validate_delete_option(item)) + return delete_option_dict + return None + + +def validate_delete_option(string): + """ Extracts a single delete_option in key[=value] format """ + from azure.cli.core.azclierror import InvalidArgumentValueError + result = {} + if string: + comps = string.split('=', 1) + if len(comps) == 2: + result = {comps[0]: comps[1]} + else: + raise InvalidArgumentValueError( + "Invalid value for delete option. Use a singular value to apply on all resources, or use " + "= to configure the delete behavior for individual resources.") + return result + + +def _validate_vm_create_nics(cmd, namespace): + from azure.mgmt.core.tools import resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + nic_ids = namespace.nics + delete_option = validate_delete_options(nic_ids, getattr(namespace, 'nic_delete_option', None)) + nics = [] + + if not nic_ids: + namespace.nic_type = 'new' + logger.debug('new NIC will be created') + return + + if not isinstance(nic_ids, list): + nic_ids = [nic_ids] + + for n in nic_ids: + nic = {'id': n if '/' in n else resource_id(name=n, + resource_group=namespace.resource_group_name, + namespace='Microsoft.Network', + type='networkInterfaces', + subscription=get_subscription_id(cmd.cli_ctx)), + 'properties': {'primary': nic_ids[0] == n} + } + if delete_option: + nic['properties']['deleteOption'] = delete_option if isinstance(delete_option, str) else \ + delete_option.get(n, None) + nics.append(nic) + + namespace.nics = nics + namespace.nic_type = 'existing' + namespace.public_ip_address_type = None + logger.debug('existing NIC(s) will be used') + + +def _validate_vm_nic_delete_option(namespace): + if not namespace.nics and namespace.nic_delete_option: + if len(namespace.nic_delete_option) == 1 and len( + namespace.nic_delete_option[0].split('=')) == 1: # pylint: disable=line-too-long + namespace.nic_delete_option = namespace.nic_delete_option[0] + elif len(namespace.nic_delete_option) > 1 or any((len(delete_option.split('=')) > 1 for delete_option in + namespace.nic_delete_option)): # pylint: disable=line-too-long + from azure.cli.core.parser import InvalidArgumentValueError + raise InvalidArgumentValueError("incorrect usage: Cannot specify individual delete option when no nic is " + "specified. Either specify a list of nics and their delete option like: " + "--nics nic1 nic2 --nic-delete-option nic1=Delete nic2=Detach or specify " + "delete option for all: --nics nic1 nic2 --nic-delete-option Delete or " + "specify delete option for the new nic created: --nic-delete-option Delete") + + +def _validate_vm_vmss_create_auth(namespace, cmd=None): + if namespace.storage_profile in [StorageProfile.ManagedSpecializedOSDisk, + StorageProfile.SASpecializedOSDisk]: + return + + if namespace.admin_username is None: + namespace.admin_username = get_default_admin_username() + if namespace.admin_username and namespace.os_type: + namespace.admin_username = _validate_admin_username(namespace.admin_username, namespace.os_type) + + # if not namespace.os_type: + # raise CLIError("Unable to resolve OS type. Specify '--os-type' argument.") + + if not namespace.authentication_type: + # if both ssh key and password, infer that authentication_type is all. + if namespace.ssh_key_value and namespace.admin_password: + namespace.authentication_type = 'all' + else: + # apply default auth type (password for Windows, ssh for Linux) by examining the OS type + namespace.authentication_type = 'password' \ + if ((namespace.os_type and namespace.os_type.lower() == 'windows') or + namespace.admin_password) else 'ssh' + + if namespace.os_type and namespace.os_type.lower() == 'windows' and namespace.authentication_type == 'ssh': + raise CLIError('SSH not supported for Windows VMs.') + + # validate proper arguments supplied based on the authentication type + if namespace.authentication_type == 'password': + if namespace.ssh_key_value or namespace.ssh_dest_key_path: + raise CLIError('SSH key cannot be used with password authentication type.') + + # if password not given, attempt to prompt user for password. + if not namespace.admin_password: + _prompt_for_password(namespace) + + # validate password + _validate_admin_password(namespace.admin_password, namespace.os_type) + + elif namespace.authentication_type == 'ssh': + + if namespace.admin_password: + raise CLIError('Admin password cannot be used with SSH authentication type.') + + validate_ssh_key(namespace, cmd) + + if not namespace.ssh_dest_key_path: + namespace.ssh_dest_key_path = '/home/{}/.ssh/authorized_keys'.format(namespace.admin_username) + + elif namespace.authentication_type == 'all': + if namespace.os_type and namespace.os_type.lower() == 'windows': + raise CLIError('SSH not supported for Windows VMs. Use password authentication.') + + if not namespace.admin_password: + _prompt_for_password(namespace) + _validate_admin_password(namespace.admin_password, namespace.os_type) + + validate_ssh_key(namespace, cmd) + if not namespace.ssh_dest_key_path: + namespace.ssh_dest_key_path = '/home/{}/.ssh/authorized_keys'.format(namespace.admin_username) + + +def _prompt_for_password(namespace): + from knack.prompting import prompt_pass, NoTTYException + try: + namespace.admin_password = prompt_pass('Admin Password: ', confirm=True) + except NoTTYException: + raise CLIError('Please specify password in non-interactive mode.') + + +def _validate_admin_username(username, os_type): + import re + if not username: + raise CLIError("admin user name can not be empty") + is_linux = (os_type.lower() == 'linux') + # pylint: disable=line-too-long + pattern = (r'[\\\/"\[\]:|<>+=;,?*@#()!A-Z]+' if is_linux else r'[\\\/"\[\]:|<>+=;,?*@]+') + linux_err = r'admin user name cannot contain upper case character A-Z, special characters \/"[]:|<>+=;,?*@#()! or start with $ or -' + win_err = r'admin user name cannot contain special characters \/"[]:|<>+=;,?*@# or ends with .' + if re.findall(pattern, username): + raise CLIError(linux_err if is_linux else win_err) + if is_linux and re.findall(r'^[$-]+', username): + raise CLIError(linux_err) + if not is_linux and username.endswith('.'): + raise CLIError(win_err) + if username.lower() in DISALLOWED_USER_NAMES: + raise CLIError( + "This user name '{}' meets the general requirements, but is specifically disallowed for this image. Please try a different value.".format( + username)) + return username + + +def _validate_admin_password(password, os_type): + import re + is_linux = (os_type.lower() == 'linux') + max_length = 72 if is_linux else 123 + min_length = 12 + + contains_lower = re.findall('[a-z]+', password) + contains_upper = re.findall('[A-Z]+', password) + contains_digit = re.findall('[0-9]+', password) + contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password) + count = len([x for x in [contains_lower, contains_upper, + contains_digit, contains_special_char] if x]) + + # pylint: disable=line-too-long + error_msg = ("The password length must be between {} and {}. Password must have the 3 of the following: " + "1 lower case character, 1 upper case character, 1 number and 1 special character.").format(min_length, + max_length) + if len(password) not in range(min_length, max_length + 1) or count < 3: + raise CLIError(error_msg) + + +def validate_ssh_key(namespace, cmd=None): + from azure.core.exceptions import HttpResponseError + ssh_key_type = namespace.ssh_key_type if hasattr(namespace, 'ssh_key_type') else 'RSA' + if hasattr(namespace, 'ssh_key_name') and namespace.ssh_key_name: + client = _compute_client_factory(cmd.cli_ctx) + # --ssh-key-name + if not namespace.ssh_key_value and not namespace.generate_ssh_keys: + # Use existing key, key must exist + try: + ssh_key_resource = client.ssh_public_keys.get(namespace.resource_group_name, namespace.ssh_key_name) + except HttpResponseError: + raise ValidationError('SSH key {} does not exist!'.format(namespace.ssh_key_name)) + namespace.ssh_key_value = [ssh_key_resource.public_key] + logger.info('Get a key from --ssh-key-name successfully') + elif namespace.ssh_key_value: + raise ValidationError('--ssh-key-name and --ssh-key-values cannot be used together') + elif namespace.generate_ssh_keys: + parameters = {} + parameters['location'] = namespace.location + public_key = _validate_ssh_key_helper("", namespace.generate_ssh_keys, ssh_key_type) + parameters['public_key'] = public_key + client.ssh_public_keys.create(resource_group_name=namespace.resource_group_name, + ssh_public_key_name=namespace.ssh_key_name, + parameters=parameters) + namespace.ssh_key_value = [public_key] + elif namespace.ssh_key_value: + if namespace.generate_ssh_keys and len(namespace.ssh_key_value) > 1: + logger.warning("Ignoring --generate-ssh-keys as multiple ssh key values have been specified.") + namespace.generate_ssh_keys = False + + processed_ssh_key_values = [] + for ssh_key_value in namespace.ssh_key_value: + processed_ssh_key_values.append(_validate_ssh_key_helper(ssh_key_value, + namespace.generate_ssh_keys, + ssh_key_type)) + namespace.ssh_key_value = processed_ssh_key_values + # if no ssh keys processed, try to generate new key / use existing at root. + else: + namespace.ssh_key_value = [_validate_ssh_key_helper("", + namespace.generate_ssh_keys, + ssh_key_type)] + + +def _validate_ssh_key_helper(ssh_key_value, should_generate_ssh_keys, ssh_key_type=None): + file_name = 'id_rsa.pub' if ssh_key_type is None or ssh_key_type == 'RSA' else 'id_ed25519.pub' + string_or_file = (ssh_key_value or + os.path.join(os.path.expanduser('~'), '.ssh', file_name)) + + content = string_or_file + if os.path.exists(string_or_file): + logger.info('Use existing SSH public key file: %s', string_or_file) + with open(string_or_file, 'r') as f: + content = f.read() + elif not keys.is_valid_ssh_rsa_public_key(content): + if should_generate_ssh_keys: + # figure out appropriate file names: + # 'base_name'(with private keys), and 'base_name.pub'(with public keys) + public_key_filepath = string_or_file + if public_key_filepath[-4:].lower() == '.pub': + private_key_filepath = public_key_filepath[:-4] + else: + private_key_filepath = public_key_filepath + '.private' + + if ssh_key_type == "Ed25519": + from azure.cli.command_modules.vm.azure_stack._vm_utils import generate_ssh_keys_ed25519 + content = generate_ssh_keys_ed25519(private_key_filepath, public_key_filepath) + else: + content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath) + logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to " + "allow SSH access to the VM. If using machines without " + "permanent storage, back up your keys to a safe location.", + private_key_filepath, public_key_filepath) + else: + raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. ' + 'You can use --generate-ssh-keys to let CLI generate one for you') + return content + + +def _validate_vm_vmss_msi(cmd, namespace, is_identity_assign=False): + # For the creation of VM and VMSS, "--role" and "--scope" should be passed in at the same time + # when assigning a role to the managed identity + if not is_identity_assign and namespace.assign_identity is not None: + if (namespace.identity_scope and not namespace.identity_role) or \ + (not namespace.identity_scope and namespace.identity_role): + raise ArgumentUsageError( + "usage error: please specify both --role and --scope when assigning a role to the managed identity") + + # For "az vm/vmss identity assign", "--role" and "--scope" should be passed in at the same time + # when assigning a role to the managed identity + if is_identity_assign: + if (namespace.identity_scope and not namespace.identity_role) or \ + (not namespace.identity_scope and namespace.identity_role): + raise ArgumentUsageError( + "usage error: please specify both --role and --scope when assigning a role to the managed identity") + + # Assign managed identity + if is_identity_assign or namespace.assign_identity is not None: + identities = namespace.assign_identity or [] + from ._vm_utils import MSI_LOCAL_ID + for i, _ in enumerate(identities): + if identities[i] != MSI_LOCAL_ID: + identities[i] = _get_resource_id(cmd.cli_ctx, identities[i], namespace.resource_group_name, + 'userAssignedIdentities', 'Microsoft.ManagedIdentity') + + user_assigned_identities = [x for x in identities if x != MSI_LOCAL_ID] + if user_assigned_identities and not cmd.supported_api_version(min_api='2017-12-01'): + raise ArgumentUsageError('usage error: user assigned identity is only available under profile ' + 'with minimum Compute API version of 2017-12-01') + if namespace.identity_scope: + if identities and MSI_LOCAL_ID not in identities: + raise ArgumentUsageError("usage error: '--scope'/'--role' is only applicable when " + "assign system identity") + # keep 'identity_role' for output as logical name is more readable + setattr(namespace, 'identity_role_id', _resolve_role_id(cmd.cli_ctx, namespace.identity_role, + namespace.identity_scope)) + elif namespace.identity_scope or namespace.identity_role: + raise ArgumentUsageError('usage error: --assign-identity [--scope SCOPE] [--role ROLE]') + + if not is_identity_assign: + _enable_msi_for_trusted_launch(namespace) + + +def _enable_msi_for_trusted_launch(namespace): + # Enable system assigned msi by default when Trusted Launch configuration is met + is_trusted_launch = namespace.security_type and namespace.security_type.lower() == 'trustedlaunch' \ + and namespace.enable_vtpm and namespace.enable_secure_boot + if is_trusted_launch and namespace.enable_integrity_monitoring: + from ._vm_utils import MSI_LOCAL_ID + logger.info('The MSI is enabled by default when Trusted Launch configuration is met') + if namespace.assign_identity is None: + namespace.assign_identity = [MSI_LOCAL_ID] + elif '[system]' not in namespace.assign_identity: + namespace.assign_identity.append(MSI_LOCAL_ID) + + +def _validate_trusted_launch(namespace): + if not namespace.security_type or namespace.security_type.lower() != 'trustedlaunch': + return + + if namespace.enable_vtpm is None: + namespace.enable_vtpm = True + + if namespace.enable_secure_boot is None: + namespace.enable_secure_boot = True + + +def trusted_launch_set_default(namespace, generation_version, features): + if not generation_version: + return + + trusted_launch = ["TrustedLaunchSupported", "TrustedLaunchAndConfidentialVmSupported"] + + features_security_type = None + for item in features: + if hasattr(item, 'name') and hasattr(item, 'value') and item.name == 'SecurityType': + features_security_type = item.value + break + + from ._constants import UPGRADE_SECURITY_HINT, COMPATIBLE_SECURITY_TYPE_VALUE + if generation_version == 'V1': + logger.warning(UPGRADE_SECURITY_HINT) + + elif generation_version == 'V2': + if features_security_type in trusted_launch: + if namespace.security_type is None: + namespace.security_type = 'TrustedLaunch' + + if namespace.security_type != COMPATIBLE_SECURITY_TYPE_VALUE: + if namespace.enable_vtpm is None: + namespace.enable_vtpm = True + + if namespace.enable_secure_boot is None: + namespace.enable_secure_boot = True + else: + if namespace.security_type is None: + namespace.security_type = COMPATIBLE_SECURITY_TYPE_VALUE + logger.warning(UPGRADE_SECURITY_HINT) + + +def _validate_generation_version_and_trusted_launch(cmd, namespace): + from azure.cli.core.profiles import ResourceType + if not cmd.supported_api_version(resource_type=ResourceType.MGMT_COMPUTE, min_api='2020-12-01'): + return + from ._vm_utils import validate_image_trusted_launch, validate_vm_disk_trusted_launch + if namespace.image is not None: + from ._vm_utils import is_valid_image_version_id + if is_valid_image_version_id(namespace.image): + if namespace.security_type is None: + namespace.security_type = 'Standard' + + image_type = _parse_image_argument(cmd, namespace) + + if image_type == 'image_id': + # managed image does not support trusted launch + validate_image_trusted_launch(namespace) + return + + if image_type == 'uri': + # vhd does not support trusted launch + return + + if image_type == 'shared_gallery_image_id': + validate_image_trusted_launch(namespace) + return + + if image_type == 'community_gallery_image_id': + validate_image_trusted_launch(namespace) + return + + if image_type == 'urn': + client = _compute_client_factory(cmd.cli_ctx).virtual_machine_images + os_version = namespace.os_version + if os_version.lower() == 'latest': + os_version = _get_latest_image_version(cmd.cli_ctx, namespace.location, namespace.os_publisher, + namespace.os_offer, namespace.os_sku) + vm_image_info = client.get(namespace.location, namespace.os_publisher, namespace.os_offer, + namespace.os_sku, os_version) + generation_version = vm_image_info.hyper_v_generation if hasattr(vm_image_info, + 'hyper_v_generation') else None + features = vm_image_info.features if hasattr(vm_image_info, 'features') and vm_image_info.features else [] + + trusted_launch_set_default(namespace, generation_version, features) + return + + # create vm with os disk + if hasattr(namespace, 'attach_os_disk') and namespace.attach_os_disk is not None: + from azure.mgmt.core.tools import parse_resource_id + if urlparse(namespace.attach_os_disk).scheme and "://" in namespace.attach_os_disk: + # vhd does not support trusted launch + return + client = _compute_client_factory(cmd.cli_ctx).disks + attach_os_disk_name = parse_resource_id(namespace.attach_os_disk)['name'] + attach_os_disk_info = client.get(namespace.resource_group_name, attach_os_disk_name) + disk_security_profile = attach_os_disk_info.security_profile if hasattr(attach_os_disk_info, + 'security_profile') else None + validate_vm_disk_trusted_launch(namespace, disk_security_profile) + + +def _validate_vm_vmss_set_applications(cmd, namespace): # pylint: disable=unused-argument + if namespace.application_configuration_overrides and \ + len(namespace.application_version_ids) != len(namespace.application_configuration_overrides): + raise ArgumentUsageError('usage error: --app-config-overrides should have the same number of items as' + ' --application-version-ids') + if namespace.treat_deployment_as_failure: + if len(namespace.application_version_ids) != len(namespace.treat_deployment_as_failure): + raise ArgumentUsageError('usage error: --treat-deployment-as-failure should have the same number of items' + ' as --application-version-ids') + for boolean_value_in_string in namespace.treat_deployment_as_failure: + if boolean_value_in_string.lower() != 'true' and boolean_value_in_string.lower() != 'false': + raise ArgumentUsageError('usage error: --treat-deployment-as-failure only accepts a list of "true" or' + ' "false" values') + + +def _resolve_role_id(cli_ctx, role, scope): + import re + import uuid + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_definitions + + role_id = None + if re.match(r'/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/', + role, re.I): + role_id = role + else: + try: + uuid.UUID(role) + role_id = '/subscriptions/{}/providers/Microsoft.Authorization/roleDefinitions/{}'.format( + client.config.subscription_id, role) + except ValueError: + pass + if not role_id: # retrieve role id + role_defs = list(client.list(scope, "roleName eq '{}'".format(role))) + if not role_defs: + raise CLIError("Role '{}' doesn't exist.".format(role)) + if len(role_defs) > 1: + ids = [r.id for r in role_defs] + err = "More than one role matches the given name '{}'. Please pick an id from '{}'" + raise CLIError(err.format(role, ids)) + role_id = role_defs[0].id + return role_id + + +def process_vm_create_namespace(cmd, namespace): + validate_tags(namespace) + _validate_location(cmd, namespace, namespace.zone, namespace.size) + + # Currently, only `az vm create` supports this feature, so it is temporarily placed in process_vm_create_namespace() + from ._vm_utils import display_region_recommendation + display_region_recommendation(cmd, namespace) + + validate_edge_zone(cmd, namespace) + if namespace.count is not None: + _validate_count(namespace) + validate_asg_names_or_ids(cmd, namespace) + _validate_vm_create_storage_profile(cmd, namespace) + if namespace.storage_profile in [StorageProfile.SACustomImage, + StorageProfile.SAPirImage]: + _validate_vm_create_storage_account(cmd, namespace) + + _validate_vm_create_availability_set(cmd, namespace) + _validate_vm_create_vmss(cmd, namespace) + _validate_vm_vmss_create_vnet(cmd, namespace) + _validate_vm_create_nsg(cmd, namespace) + _validate_vm_vmss_create_public_ip(cmd, namespace) + _validate_vm_create_nics(cmd, namespace) + _validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace) + _validate_vm_vmss_create_auth(namespace, cmd) + + _validate_proximity_placement_group(cmd, namespace) + _validate_vm_create_dedicated_host(cmd, namespace) + + if namespace.secrets: + _validate_secrets(namespace.secrets, namespace.os_type) + _validate_trusted_launch(namespace) + _validate_vm_vmss_msi(cmd, namespace) + _validate_generation_version_and_trusted_launch(cmd, namespace) + if namespace.boot_diagnostics_storage: + namespace.boot_diagnostics_storage = get_storage_blob_uri(cmd.cli_ctx, namespace.boot_diagnostics_storage) + + _validate_capacity_reservation_group(cmd, namespace) + _validate_vm_nic_delete_option(namespace) + _validate_community_gallery_legal_agreement_acceptance(cmd, namespace) + + +# endregion + + +def process_vm_update_namespace(cmd, namespace): + _validate_vm_create_dedicated_host(cmd, namespace) + _validate_capacity_reservation_group(cmd, namespace) + _validate_vm_vmss_update_ephemeral_placement(cmd, namespace) + + +# region VMSS Create Validators +def _get_default_address_pool(cli_ctx, resource_group, balancer_name, balancer_type): + option_name = '--backend-pool-name' + + if balancer_type == 'application_gateways': + client = import_aaz_by_profile(cli_ctx.cloud.profile, "network.application_gateway") + elif balancer_type == 'load_balancers': + client = import_aaz_by_profile(cli_ctx.cloud.profile, "network.lb") + else: + raise CLIError('unrecognized balancer type: {}'.format(balancer_type)) + + balancer = client.Show(cli_ctx=cli_ctx)(command_args={ + 'name': balancer_name, + 'resource_group': resource_group + }) + values = [x['name'] for x in balancer['backendAddressPools']] + if len(values) > 1: + raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' " + "explicitly.".format(option_name, ', '.join(values))) + if not values: + raise CLIError("No existing values found for '{0}'. Create one first and try " + "again.".format(option_name)) + return values[0] + + +# Client end hack per: https://github.com/Azure/azure-cli/issues/9943 +def _validate_vmss_single_placement_group(namespace): + if namespace.zones or namespace.instance_count > 100: + if namespace.single_placement_group is None: + namespace.single_placement_group = False + + +def _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace): + from azure.mgmt.core.tools import parse_resource_id + from azure.cli.core.profiles import ResourceType + from azure.core.exceptions import HttpResponseError + std_lb_is_available = cmd.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK) + + if namespace.load_balancer and namespace.application_gateway: + raise CLIError('incorrect usage: --load-balancer NAME_OR_ID | ' + '--application-gateway NAME_OR_ID') + + # Resolve the type of balancer (if any) being used + balancer_type = 'None' + if namespace.load_balancer is None and namespace.application_gateway is None: + if std_lb_is_available: + balancer_type = 'loadBalancer' + else: # needed for Stack profile 2017_03_09 + balancer_type = 'loadBalancer' if namespace.single_placement_group is not False else 'applicationGateway' + logger.debug("W/o STD LB, defaulting to '%s' under because single placement group is disabled", + balancer_type) + + elif namespace.load_balancer: + balancer_type = 'loadBalancer' + elif namespace.application_gateway: + balancer_type = 'applicationGateway' + + if balancer_type == 'applicationGateway': + + if namespace.application_gateway: + client = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.application_gateway") + try: + rg = parse_resource_id(namespace.application_gateway).get( + 'resource_group', namespace.resource_group_name) + ag_name = parse_resource_id(namespace.application_gateway)['name'] + client.Show(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': ag_name, + 'resource_group': rg + }) + namespace.app_gateway_type = 'existing' + namespace.backend_pool_name = namespace.backend_pool_name or \ + _get_default_address_pool(cmd.cli_ctx, rg, ag_name, 'application_gateways') + logger.debug("using specified existing application gateway '%s'", namespace.application_gateway) + except HttpResponseError: + namespace.app_gateway_type = 'new' + logger.debug("application gateway '%s' not found. It will be created.", namespace.application_gateway) + elif namespace.application_gateway == '': + namespace.app_gateway_type = None + logger.debug('no application gateway will be used') + elif namespace.application_gateway is None: + namespace.app_gateway_type = 'new' + logger.debug('new application gateway will be created') + + # AppGateway frontend + required = [] + if namespace.app_gateway_type == 'new': + required.append('app_gateway_sku') + required.append('app_gateway_capacity') + if namespace.vnet_type != 'new': + required.append('app_gateway_subnet_address_prefix') + elif namespace.app_gateway_type == 'existing': + required.append('backend_pool_name') + forbidden = ['nat_pool_name', 'load_balancer', 'health_probe'] + validate_parameter_set(namespace, required, forbidden, description='network balancer: application gateway') + + elif balancer_type == 'loadBalancer': + # LoadBalancer frontend + required = [] + forbidden = ['app_gateway_subnet_address_prefix', 'application_gateway', 'app_gateway_sku', + 'app_gateway_capacity'] + validate_parameter_set(namespace, required, forbidden, description='network balancer: load balancer') + + if namespace.load_balancer: + rg = parse_resource_id(namespace.load_balancer).get('resource_group', namespace.resource_group_name) + lb_name = parse_resource_id(namespace.load_balancer)['name'] + lb = get_network_lb(cmd.cli_ctx, namespace.resource_group_name, lb_name) + if lb: + namespace.load_balancer_type = 'existing' + namespace.backend_pool_name = namespace.backend_pool_name or \ + _get_default_address_pool(cmd.cli_ctx, rg, lb_name, 'load_balancers') + if not namespace.nat_pool_name: + if len(lb['inboundNatPools']) > 1: + raise CLIError( + "Multiple possible values found for '{0}': {1}\nSpecify '{0}' explicitly.".format( + # pylint: disable=line-too-long + '--nat-pool-name', ', '.join([n['name'] for n in lb['inboundNatPools']]))) + if lb['inboundNatPools']: + namespace.nat_pool_name = lb['inboundNatPools'][0]['name'] + logger.debug("using specified existing load balancer '%s'", namespace.load_balancer) + else: + namespace.load_balancer_type = 'new' + logger.debug("load balancer '%s' not found. It will be created.", namespace.load_balancer) + elif namespace.load_balancer == '': + namespace.load_balancer_type = None + logger.debug('no load balancer will be used') + elif namespace.load_balancer is None: + namespace.load_balancer_type = 'new' + logger.debug('new load balancer will be created') + + if namespace.load_balancer_type == 'new' and namespace.single_placement_group is False and std_lb_is_available: + if namespace.load_balancer_sku is None: + namespace.load_balancer_sku = 'Standard' + logger.debug("use Standard sku as single placement group is turned off") + elif namespace.load_balancer_sku == 'Basic': + if namespace.zones: + err = "'Standard' load balancer is required for zonal scale-sets" + elif namespace.instance_count > 100: + err = "'Standard' load balancer is required for scale-sets with 100+ instances" + else: + err = "'Standard' load balancer is required because 'single placement group' is turned off" + + raise CLIError('usage error:{}'.format(err)) + + +def get_network_lb(cli_ctx, resource_group_name, lb_name): + from azure.core.exceptions import HttpResponseError + LBShow = import_aaz_by_profile(cli_ctx.cloud.profile, "network.lb").Show + try: + return LBShow(cli_ctx=cli_ctx)({ + "name": lb_name, + "resource_group": resource_group_name + }) + except HttpResponseError: + return None + + +def process_vmss_create_namespace(cmd, namespace): + from azure.cli.core.azclierror import InvalidArgumentValueError + flexible_str = 'Flexible' + + if namespace.os_disk_delete_option is not None or namespace.data_disk_delete_option is not None: + if namespace.orchestration_mode.lower() != flexible_str.lower(): + raise InvalidArgumentValueError('usage error: --os-disk-delete-option/--data-disk-delete-option is only' + ' available for VMSS with flexible orchestration mode') + + if namespace.regular_priority_count is not None or namespace.regular_priority_percentage is not None: + if namespace.orchestration_mode.lower() != flexible_str.lower(): + raise InvalidArgumentValueError('usage error: --regular-priority-count/--regular-priority-percentage is' + ' only available for VMSS with flexible orchestration mode') + + if namespace.orchestration_mode.lower() == flexible_str.lower(): + + # The commentted parameters are also forbidden, but they have default values. + # I don't know whether they are provided by user. + + namespace.load_balancer_sku = 'Standard' # lb sku MUST be standard + # namespace.public_ip_per_vm = True # default to true for VMSS Flex + + namespace.use_unmanaged_disk = None + + banned_params = { + '--disable-overprovision': namespace.disable_overprovision, + '--health-probe': namespace.health_probe, + '--host-group': namespace.host_group, + '--nat-pool-name': namespace.nat_pool_name, + '--scale-in-policy': namespace.scale_in_policy, + '--user-data': namespace.user_data + } + + for param, value in banned_params.items(): + if value is not None: + raise ArgumentUsageError(f'usage error: {param} is not supported for Flex mode') + + if namespace.vm_sku and not namespace.image: + raise ArgumentUsageError('usage error: please specify the --image when you want to specify the VM SKU') + + _validate_trusted_launch(namespace) + if namespace.image: + + if namespace.vm_sku is None: + from azure.cli.core.cloud import AZURE_US_GOV_CLOUD + if cmd.cli_ctx.cloud.name != AZURE_US_GOV_CLOUD.name: + namespace.vm_sku = 'Standard_DS1_v2' + else: + namespace.vm_sku = 'Standard_D1_v2' + + if namespace.network_api_version is None: + namespace.network_api_version = '2020-11-01' + + if namespace.platform_fault_domain_count is None: + namespace.platform_fault_domain_count = 1 + + if namespace.computer_name_prefix is None: + namespace.computer_name_prefix = namespace.vmss_name[:8] + + # if namespace.platform_fault_domain_count is None: + # raise CLIError("usage error: --platform-fault-domain-count is required in Flexible mode") + + if namespace.tags is not None: + validate_tags(namespace) + _validate_location(cmd, namespace, namespace.zones, namespace.vm_sku) + validate_edge_zone(cmd, namespace) + if namespace.application_security_groups is not None: + validate_asg_names_or_ids(cmd, namespace) + + if getattr(namespace, 'attach_os_disk', None) or namespace.image is not None: + _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=True) + + if namespace.vnet_name or namespace.subnet or namespace.image: + _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=True) + _validate_vmss_create_subnet(namespace) + + if namespace.load_balancer or namespace.application_gateway or namespace.image: + _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace) + + if namespace.public_ip_address or namespace.image: + _validate_vmss_create_public_ip(cmd, namespace) + + if namespace.nsg is not None: + _validate_vmss_create_nsg(cmd, namespace) + if namespace.accelerated_networking is not None: + _validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace) + if any([namespace.admin_password, namespace.ssh_dest_key_path, namespace.generate_ssh_keys, + namespace.authentication_type, namespace.os_type]): + _validate_vm_vmss_create_auth(namespace, cmd) + if namespace.assign_identity == '[system]': + raise InvalidArgumentValueError('usage error: only user assigned indetity is suppoprted for Flex mode.') + if namespace.assign_identity is not None: + _validate_vm_vmss_msi(cmd, namespace) # -- UserAssignedOnly + _validate_proximity_placement_group(cmd, namespace) + _validate_vmss_terminate_notification(cmd, namespace) + if namespace.automatic_repairs_grace_period is not None: + _validate_vmss_create_automatic_repairs(cmd, namespace) + _validate_vmss_create_host_group(cmd, namespace) + + if namespace.secrets is not None: + _validate_secrets(namespace.secrets, namespace.os_type) + + if namespace.eviction_policy and not namespace.priority: + raise ArgumentUsageError('usage error: --priority PRIORITY [--eviction-policy POLICY]') + + return + + # Uniform mode + if namespace.disable_overprovision is None: + namespace.disable_overprovision = False + validate_tags(namespace) + if namespace.vm_sku is None: + from azure.cli.core.cloud import AZURE_US_GOV_CLOUD + if cmd.cli_ctx.cloud.name != AZURE_US_GOV_CLOUD.name: + namespace.vm_sku = 'Standard_DS1_v2' + else: + namespace.vm_sku = 'Standard_D1_v2' + _validate_location(cmd, namespace, namespace.zones, namespace.vm_sku) + validate_edge_zone(cmd, namespace) + validate_asg_names_or_ids(cmd, namespace) + _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=True) + _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=True) + + _validate_vmss_single_placement_group(namespace) + _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace) + _validate_vmss_create_subnet(namespace) + _validate_vmss_create_public_ip(cmd, namespace) + _validate_vmss_create_nsg(cmd, namespace) + _validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace) + _validate_vm_vmss_create_auth(namespace, cmd) + _validate_trusted_launch(namespace) + _validate_vm_vmss_msi(cmd, namespace) + _validate_generation_version_and_trusted_launch(cmd, namespace) + _validate_proximity_placement_group(cmd, namespace) + _validate_vmss_terminate_notification(cmd, namespace) + _validate_vmss_create_automatic_repairs(cmd, namespace) + _validate_vmss_create_host_group(cmd, namespace) + + if namespace.secrets: + _validate_secrets(namespace.secrets, namespace.os_type) + + if not namespace.public_ip_per_vm and namespace.vm_domain_name: + raise ArgumentUsageError('usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled') + + if namespace.eviction_policy and not namespace.priority: + raise ArgumentUsageError('usage error: --priority PRIORITY [--eviction-policy POLICY]') + + _validate_capacity_reservation_group(cmd, namespace) + _validate_community_gallery_legal_agreement_acceptance(cmd, namespace) + + +def validate_vmss_update_namespace(cmd, namespace): # pylint: disable=unused-argument + if not namespace.instance_id: + if namespace.protect_from_scale_in is not None or namespace.protect_from_scale_set_actions is not None: + raise CLIError("usage error: protection policies can only be applied to VM instances within a VMSS." + " Please use --instance-id to specify a VM instance") + _validate_vmss_update_terminate_notification_related(cmd, namespace) + _validate_vmss_update_automatic_repairs(cmd, namespace) + _validate_capacity_reservation_group(cmd, namespace) + _validate_vm_vmss_update_ephemeral_placement(cmd, namespace) + + +# endregion + + +# region disk, snapshot, image validators +def process_vm_disk_attach_namespace(cmd, namespace): + if not namespace.disks and not namespace.disk and not namespace.disk_ids: + raise RequiredArgumentMissingError("Please use at least one of --name, --disks and --disk-ids") + + if namespace.disk and namespace.disks: + raise MutuallyExclusiveArgumentError("You can only specify one of --name and --disks") + + if namespace.disk and namespace.disk_ids: + raise MutuallyExclusiveArgumentError("You can only specify one of --name and --disk-ids") + + if namespace.disks and namespace.disk_ids: + raise MutuallyExclusiveArgumentError("You can only specify one of --disks and --disk-ids") + + disks = [] + if namespace.disk: + disks = [_get_resource_id(cmd.cli_ctx, namespace.disk, namespace.resource_group_name, + 'disks', 'Microsoft.Compute')] + if namespace.disks: + for disk in namespace.disks: + disks.append(_get_resource_id(cmd.cli_ctx, disk, namespace.resource_group_name, + 'disks', 'Microsoft.Compute')) + namespace.disks = disks + + if len(disks) > 1 and namespace.lun: + raise MutuallyExclusiveArgumentError("You cannot specify the --lun for multiple disks") + + if namespace.disk_ids and len(namespace.disk_ids) > 1 and namespace.lun: + raise MutuallyExclusiveArgumentError("You cannot specify the --lun for multiple disk IDs") + + +def process_vm_disk_detach_namespace(namespace): + if not namespace.disk_name and not namespace.disk_ids: + raise RequiredArgumentMissingError("Please use at least one '--name', '--disk-ids'") + + +def validate_vmss_disk(cmd, namespace): + if namespace.disk: + namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk, + namespace.resource_group_name, 'disks', 'Microsoft.Compute') + if bool(namespace.disk) == bool(namespace.size_gb): + raise CLIError('usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB') + if bool(namespace.disk) != bool(namespace.instance_id): + raise CLIError('usage error: --disk EXIST_DISK --instance-id ID') + + +def _validate_gallery_image_reference(cmd, namespace): + from azure.cli.core.profiles import ResourceType + is_validate = 'gallery_image_reference' in namespace and namespace.gallery_image_reference is not None \ + and cmd.supported_api_version(resource_type=ResourceType.MGMT_COMPUTE, + operation_group='disks', min_api='2022-03-02') + if not is_validate: + return + + from azure.cli.command_modules.vm.azure_stack._image_builder import GalleryImageReferenceType + from ._vm_utils import is_compute_gallery_image_id, is_community_gallery_image_id, \ + is_shared_gallery_image_id + + gallery_image_reference = namespace.gallery_image_reference + if is_compute_gallery_image_id(gallery_image_reference): + namespace.gallery_image_reference_type = GalleryImageReferenceType.COMPUTE.backend_key + return + if is_community_gallery_image_id(gallery_image_reference): + namespace.gallery_image_reference_type = GalleryImageReferenceType.COMMUNITY.backend_key + return + if is_shared_gallery_image_id(gallery_image_reference): + namespace.gallery_image_reference_type = GalleryImageReferenceType.SHARED.backend_key + return + + from azure.cli.core.parser import InvalidArgumentValueError + raise InvalidArgumentValueError('usage error: {} is an invalid gallery image reference, please provide valid ' + 'compute, shared or community gallery image version. For details about valid ' + 'format, please refer to the help sample'.format(gallery_image_reference)) + + +def process_disk_create_namespace(cmd, namespace): + from azure.core.exceptions import HttpResponseError + validate_tags(namespace) + validate_edge_zone(cmd, namespace) + _validate_gallery_image_reference(cmd, namespace) + _validate_security_data_uri(namespace) + _validate_upload_type(cmd, namespace) + _validate_secure_vm_disk_encryption_set(namespace) + _validate_hyper_v_generation(namespace) + if namespace.source: + usage_error = 'usage error: --source {SNAPSHOT | DISK | RESTOREPOINT} | ' \ + '--source VHD_BLOB_URI [--source-storage-account-id ID]' + try: + namespace.source_blob_uri, namespace.source_disk, namespace.source_snapshot, \ + namespace.source_restore_point, _ = _figure_out_storage_source( + cmd.cli_ctx, namespace.resource_group_name, namespace.source) + if not namespace.source_blob_uri and namespace.source_storage_account_id: + raise ArgumentUsageError(usage_error) + except HttpResponseError: + raise ArgumentUsageError(usage_error) + + +def _validate_security_data_uri(namespace): + if 'security_data_uri' not in namespace or not namespace.security_data_uri: + return + + if not namespace.security_type: + raise RequiredArgumentMissingError( + 'Please specify --security-type when using the --security-data-uri parameter') + + if not namespace.hyper_v_generation or namespace.hyper_v_generation != 'V2': + raise ArgumentUsageError( + "Please specify --hyper-v-generation as 'V2' when using the --security-data-uri parameter") + + if not namespace.source: + raise RequiredArgumentMissingError( + 'Please specify --source when using the --security-data-uri parameter') + + +def _validate_upload_type(cmd, namespace): + if 'upload_type' not in namespace: + return + + if not namespace.upload_type and namespace.for_upload: + namespace.upload_type = 'Upload' + + if namespace.upload_type == 'UploadWithSecurityData': + + if not cmd.supported_api_version(min_api='2021-08-01', operation_group='disks'): + raise ArgumentUsageError( + "'UploadWithSecurityData' is not supported in the current profile. " + "Please upgrade your profile with 'az cloud set --profile newerProfile' and try again") + + if not namespace.security_type: + raise RequiredArgumentMissingError( + "Please specify --security-type when the value of --upload-type is 'UploadWithSecurityData'") + + if not namespace.hyper_v_generation or namespace.hyper_v_generation != 'V2': + raise ArgumentUsageError( + "Please specify --hyper-v-generation as 'V2' the value of --upload-type is 'UploadWithSecurityData'") + + +def _validate_secure_vm_disk_encryption_set(namespace): + if 'secure_vm_disk_encryption_set' not in namespace: + return + + if namespace.secure_vm_disk_encryption_set: + if not namespace.security_type or \ + namespace.security_type.lower() != 'confidentialvm_diskencryptedwithcustomerkey': + raise ArgumentUsageError('usage error: --secure-vm-disk-encryption-set can only be specified only ' + 'when --security-type is set to ConfidentialVM_DiskEncryptedWithCustomerKey') + + elif namespace.security_type and namespace.security_type.lower() == 'confidentialvm_diskencryptedwithcustomerkey': + raise ArgumentUsageError('usage error: --secure-vm-disk-encryption-set is mandatory when ' + '--security-type is set to ConfidentialVM_DiskEncryptedWithCustomerKey') + + +def _validate_hyper_v_generation(namespace): + if namespace.security_type and (not namespace.hyper_v_generation or namespace.hyper_v_generation == 'V1'): + logger.warning( + 'Enabling security features by using parameter "--security-type" requires UEFI support with Generation 2 ' + 'VMs, please set the parameter "--hyper-v-generation" to "V2" for enabling Generation 2 VM support.') + + +def process_snapshot_create_namespace(cmd, namespace): + from azure.core.exceptions import HttpResponseError + validate_tags(namespace) + validate_edge_zone(cmd, namespace) + _validate_gallery_image_reference(cmd, namespace) + if namespace.source: + usage_error = 'usage error: --source {SNAPSHOT | DISK} | --source VHD_BLOB_URI [--source-storage-account-id ID]' + try: + namespace.source_blob_uri, namespace.source_disk, namespace.source_snapshot, _, source_info = \ + _figure_out_storage_source(cmd.cli_ctx, namespace.resource_group_name, namespace.source) + if not namespace.source_blob_uri and namespace.source_storage_account_id: + raise ArgumentUsageError(usage_error) + # autodetect copy_start for `az snapshot create` + if 'snapshot create' in cmd.name and hasattr(namespace, 'copy_start') and namespace.copy_start is None: + if not source_info: + from azure.cli.core.util import parse_proxy_resource_id + result = parse_proxy_resource_id(namespace.source_disk or namespace.source_snapshot) + try: + source_info, _ = _get_disk_or_snapshot_info(cmd.cli_ctx, + result['resource_group'], + result['name']) + except Exception: # pylint: disable=broad-except + # There's a chance that the source doesn't exist, eg, vmss os disk. + # You can get the id of vmss os disk by + # `az vmss show -g {} -n {} --instance-id {} --query storageProfile.osDisk.managedDisk.id` + # But `az disk show --ids {}` will return ResourceNotFound error + # We don't autodetect copy_start in this situation + return + if not namespace.location: + get_default_location_from_resource_group(cmd, namespace) + # if the source location differs from target location, then it's copy_start scenario + if namespace.incremental: + namespace.copy_start = source_info.location != namespace.location + except HttpResponseError: + raise ArgumentUsageError(usage_error) + + +def process_image_create_namespace(cmd, namespace): + from azure.mgmt.core.tools import parse_resource_id + validate_tags(namespace) + validate_edge_zone(cmd, namespace) + source_from_vm = False + try: + # try capturing from VM, a most common scenario + res_id = _get_resource_id(cmd.cli_ctx, namespace.source, namespace.resource_group_name, + 'virtualMachines', 'Microsoft.Compute') + res = parse_resource_id(res_id) + if res['type'] == 'virtualMachines': + compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription']) + vm_info = compute_client.virtual_machines.get(res['resource_group'], res['name']) + source_from_vm = True + except ResourceNotFoundError: + pass + + if source_from_vm: + # pylint: disable=no-member + namespace.os_type = vm_info.storage_profile.os_disk.os_type + namespace.source_virtual_machine = res_id + if namespace.data_disk_sources: + raise CLIError("'--data-disk-sources' is not allowed when capturing " + "images from virtual machines") + else: + # pylint: disable=line-too-long + namespace.os_blob_uri, namespace.os_disk, namespace.os_snapshot, _, _ = _figure_out_storage_source(cmd.cli_ctx, + namespace.resource_group_name, + namespace.source) + namespace.data_blob_uris = [] + namespace.data_disks = [] + namespace.data_snapshots = [] + if namespace.data_disk_sources: + for data_disk_source in namespace.data_disk_sources: + source_blob_uri, source_disk, source_snapshot, _, _ = _figure_out_storage_source( + cmd.cli_ctx, namespace.resource_group_name, data_disk_source) + if source_blob_uri: + namespace.data_blob_uris.append(source_blob_uri) + if source_disk: + namespace.data_disks.append(source_disk) + if source_snapshot: + namespace.data_snapshots.append(source_snapshot) + if not namespace.os_type: + raise CLIError("usage error: os type is required to create the image, " + "please specify '--os-type OS_TYPE'") + + +def _figure_out_storage_source(cli_ctx, resource_group_name, source): + source_blob_uri = None + source_disk = None + source_snapshot = None + source_info = None + source_restore_point = None + if urlparse(source).scheme: # a uri? + source_blob_uri = source + elif '/disks/' in source.lower(): + source_disk = source + elif '/snapshots/' in source.lower(): + source_snapshot = source + elif '/restorepoints/' in source.lower(): + source_restore_point = source + else: + source_info, is_snapshot = _get_disk_or_snapshot_info(cli_ctx, resource_group_name, source) + if is_snapshot: + source_snapshot = source_info.id + else: + source_disk = source_info.id + + return (source_blob_uri, source_disk, source_snapshot, source_restore_point, source_info) + + +def _get_disk_or_snapshot_info(cli_ctx, resource_group_name, source): + compute_client = _compute_client_factory(cli_ctx) + is_snapshot = True + + try: + info = compute_client.snapshots.get(resource_group_name, source) + except ResourceNotFoundError: + is_snapshot = False + info = compute_client.disks.get(resource_group_name, source) + + return info, is_snapshot + + +def process_disk_encryption_namespace(cmd, namespace): + namespace.disk_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.disk_encryption_keyvault, + namespace.resource_group_name, + 'vaults', 'Microsoft.KeyVault') + + if namespace.key_encryption_keyvault: + if not namespace.key_encryption_key: + raise CLIError("Incorrect usage '--key-encryption-keyvault': " + "'--key-encryption-key' is required") + namespace.key_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.key_encryption_keyvault, + namespace.resource_group_name, + 'vaults', 'Microsoft.KeyVault') + + +def process_assign_identity_namespace(cmd, namespace): + _validate_vm_vmss_msi(cmd, namespace, is_identity_assign=True) + + +def process_remove_identity_namespace(cmd, namespace): + if namespace.identities: + from ._vm_utils import MSI_LOCAL_ID + for i in range(len(namespace.identities)): + if namespace.identities[i] != MSI_LOCAL_ID: + namespace.identities[i] = _get_resource_id(cmd.cli_ctx, namespace.identities[i], + namespace.resource_group_name, + 'userAssignedIdentities', + 'Microsoft.ManagedIdentity') + + +def process_set_applications_namespace(cmd, namespace): # pylint: disable=unused-argument + _validate_vm_vmss_set_applications(cmd, namespace) + + +def process_gallery_image_version_namespace(cmd, namespace): + from azure.cli.core.azclierror import InvalidArgumentValueError + TargetRegion, EncryptionImages, OSDiskImageEncryption, DataDiskImageEncryption, \ + ConfidentialVMEncryptionType, GalleryTargetExtendedLocation, GalleryExtendedLocation = cmd.get_models( + 'TargetRegion', 'EncryptionImages', 'OSDiskImageEncryption', 'DataDiskImageEncryption', + 'ConfidentialVMEncryptionType', 'GalleryTargetExtendedLocation', 'GalleryExtendedLocation') + + if namespace.target_regions: + if hasattr(namespace, 'target_region_encryption') and namespace.target_region_encryption: + if len(namespace.target_regions) != len(namespace.target_region_encryption): + raise InvalidArgumentValueError( + 'usage error: Length of --target-region-encryption should be as same as length of target regions') + + if hasattr(namespace, 'target_region_cvm_encryption') and namespace.target_region_cvm_encryption: + OSDiskImageSecurityProfile = cmd.get_models('OSDiskImageSecurityProfile') + if len(namespace.target_regions) != len(namespace.target_region_cvm_encryption): + raise InvalidArgumentValueError( + 'usage error: Length of --target_region_cvm_encryption should be as same as ' + 'length of target regions') + + storage_account_types_list = [item.lower() for item in ['Standard_LRS', 'Standard_ZRS', 'Premium_LRS']] + storage_account_types_str = ", ".join(storage_account_types_list) + + regions_info = [] + for i, t in enumerate(namespace.target_regions): + parts = t.split('=', 2) + replica_count = None + storage_account_type = None + + # Region specified, but also replica count or storage account type + if len(parts) == 2: + try: + replica_count = int(parts[1]) + except ValueError: + storage_account_type = parts[1] + if parts[1].lower() not in storage_account_types_list: + raise ArgumentUsageError( + "usage error: {} is an invalid target region argument. " + "The second part is neither an integer replica count or a valid storage account type. " + "Storage account types must be one of {}.".format(t, storage_account_types_str)) + + # Region specified, but also replica count and storage account type + elif len(parts) == 3: + try: + replica_count = int(parts[1]) # raises ValueError if this is not a replica count, try other order. + storage_account_type = parts[2] + if storage_account_type not in storage_account_types_list: + raise ArgumentUsageError( + "usage error: {} is an invalid target region argument. " + "The third part is not a valid storage account type. " + "Storage account types must be one of {}.".format(t, storage_account_types_str)) + except ValueError: + raise ArgumentUsageError( + "usage error: {} is an invalid target region argument. " + "The second part must be a valid integer replica count.".format(t)) + + # Parse target region encryption, example: ['des1,0,des2,1,des3', 'null', 'des4'] + encryption = None + os_disk_image = None + data_disk_images = None + if hasattr(namespace, 'target_region_encryption') and namespace.target_region_encryption: + terms = namespace.target_region_encryption[i].split(',') + # OS disk + os_disk_image = terms[0] + if os_disk_image == 'null': + os_disk_image = None + else: + des_id = _disk_encryption_set_format(cmd, namespace, os_disk_image) + os_disk_image = OSDiskImageEncryption(disk_encryption_set_id=des_id) + # Data disk + if len(terms) > 1: + data_disk_images = terms[1:] + data_disk_images_len = len(data_disk_images) + if data_disk_images_len % 2 != 0: + raise ArgumentUsageError( + 'usage error: LUN and disk encryption set for data disk should appear in pair in ' + '--target-region-encryption. Example: osdes,0,datades0,1,datades1') + data_disk_image_encryption_list = [] + for j in range(int(data_disk_images_len / 2)): + lun = data_disk_images[j * 2] + des_id = data_disk_images[j * 2 + 1] + des_id = _disk_encryption_set_format(cmd, namespace, des_id) + data_disk_image_encryption_list.append(DataDiskImageEncryption( + lun=lun, disk_encryption_set_id=des_id)) + data_disk_images = data_disk_image_encryption_list + + if hasattr(namespace, 'target_region_cvm_encryption') and namespace.target_region_cvm_encryption: + cvm_terms = namespace.target_region_cvm_encryption[i].split(',') + if not cvm_terms or len(cvm_terms) != 2: + raise ArgumentUsageError( + "usage error: {} is an invalid target region cvm encryption. " + "Both os_cvm_encryption_type and os_cvm_des parameters are required.".format(cvm_terms)) + + storage_profile_types = [profile_type.value for profile_type in ConfidentialVMEncryptionType] + storage_profile_types_str = ", ".join(storage_profile_types) + if cvm_terms[0] not in storage_profile_types: + raise ArgumentUsageError( + "usage error: {} is an invalid os_cvm_encryption_type. " + "The valid values for os_cvm_encryption_type are {}".format( + cvm_terms, storage_profile_types_str)) + cvm_des_id = None + if cvm_terms[1]: + cvm_des_id = _disk_encryption_set_format(cmd, namespace, cvm_terms[1]) + security_profile = OSDiskImageSecurityProfile(confidential_vm_encryption_type=cvm_terms[0], + secure_vm_disk_encryption_set_id=cvm_des_id) + if os_disk_image: + os_disk_image.security_profile = security_profile + else: + os_disk_image = OSDiskImageEncryption(security_profile=security_profile) + + if os_disk_image or data_disk_images: + encryption = EncryptionImages(os_disk_image=os_disk_image, data_disk_images=data_disk_images) + + # At least the region is specified + if len(parts) >= 1: + regions_info.append(TargetRegion(name=parts[0], regional_replica_count=replica_count, + storage_account_type=storage_account_type, + encryption=encryption)) + + namespace.target_regions = regions_info + + if hasattr(namespace, 'target_edge_zones') and namespace.target_edge_zones: + if len(namespace.target_edge_zones) == 1 and namespace.target_edge_zones[0].lower() == 'none': + namespace.target_edge_zones = [] + return + if hasattr(namespace, 'target_zone_encryption') and namespace.target_zone_encryption: + if len(namespace.target_edge_zones) != len(namespace.target_zone_encryption): + raise InvalidArgumentValueError( + 'usage error: Length of --target-edge-zone-encryption ' + 'should be as same as length of --target-edge-zones') + + storage_account_types_list = [item.lower() for item in + ['Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'StandardSSD_LRS']] + storage_account_types_str = ", ".join(storage_account_types_list) + + edge_zone_info = [] + for i, t in enumerate(namespace.target_edge_zones): + parts = t.split('=', 3) + # At least the region and edge zone are specified + if len(parts) < 2: + continue + + region = parts[0] + edge_zone = parts[1] + replica_count = None + storage_account_type = None + + # Both "region" and "edge zone" are specified, + # but only one of "replica count" and "storage account type" is specified + if len(parts) == 3: + try: + replica_count = int(parts[2]) + except ValueError: + storage_account_type = parts[2] + if parts[2].lower() not in storage_account_types_list: + raise ArgumentUsageError( + "usage error: {} is an invalid target edge zone argument. " + "The third part is neither an integer replica count or a valid storage account type. " + "Storage account types must be one of {}.".format(t, storage_account_types_str)) + + # Not only "region" and "edge zone" are specified, + # but also "replica count" and "storage account type" are specified + elif len(parts) == 4: + try: + replica_count = int(parts[2]) # raises ValueError if this is not a replica count, try other order. + storage_account_type = parts[3] + if storage_account_type not in storage_account_types_list: + raise ArgumentUsageError( + "usage error: {} is an invalid target edge zone argument. " + "The forth part is not a valid storage account type. " + "Storage account types must be one of {}.".format(t, storage_account_types_str)) + except ValueError: + raise ArgumentUsageError( + "usage error: {} is an invalid target edge zone argument. " + "The third part must be a valid integer replica count.".format(t)) + + # Parse target edge zone encryption, + # example: ['microsoftlosangeles1', 'des1, 0, des2, 1, des3', 'null', 'des4'] + encryption = None + os_disk_image = None + data_disk_images = None + if hasattr(namespace, 'target_zone_encryption') and namespace.target_zone_encryption: + terms = namespace.target_zone_encryption[i].split(',') + if len(terms) < 2: + break + # OS disk + os_disk_image = terms[1] + if os_disk_image == 'null': + os_disk_image = None + else: + des_id = _disk_encryption_set_format(cmd, namespace, os_disk_image) + os_disk_image = OSDiskImageEncryption(disk_encryption_set_id=des_id) + # Data disk + if len(terms) > 2: + data_disk_images = terms[2:] + data_disk_images_len = len(data_disk_images) + if data_disk_images_len % 2 != 0: + raise ArgumentUsageError( + 'usage error: LUN and disk encryption set for data disk should appear in pair in ' + '--target-edge-zone-encryption. Example: 1,osdes,0,datades0,1,datades1') + data_disk_image_encryption_list = [] + for j in range(int(data_disk_images_len / 2)): + lun = data_disk_images[j * 2] + des_id = data_disk_images[j * 2 + 1] + des_id = _disk_encryption_set_format(cmd, namespace, des_id) + data_disk_image_encryption_list.append(DataDiskImageEncryption( + lun=lun, disk_encryption_set_id=des_id)) + data_disk_images = data_disk_image_encryption_list + + if os_disk_image or data_disk_images: + encryption = EncryptionImages(os_disk_image=os_disk_image, data_disk_images=data_disk_images) + + extended_location = GalleryExtendedLocation(name=edge_zone, type='EdgeZone') + + edge_zone_info.append( + GalleryTargetExtendedLocation(name=region, extended_location_replica_count=replica_count, + extended_location=extended_location, + storage_account_type=storage_account_type, + encryption=encryption) + ) + + namespace.target_edge_zones = edge_zone_info + + +def _disk_encryption_set_format(cmd, namespace, name): + """ + Transform name to ID. If it's already a valid ID, do nothing. + :param name: string + :return: ID + """ + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if name is not None and not is_valid_resource_id(name): + name = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=name) + return name + + +# endregion + + +def process_ppg_create_namespace(namespace): + validate_tags(namespace) + # The availability zone can be provided only when an intent is provided + if namespace.zone and not namespace.intent_vm_sizes: + raise RequiredArgumentMissingError('The --zone can be provided only when an intent is provided. ' + 'Please use parameter --intent-vm-sizes to specify possible sizes of ' + 'virtual machines that can be created in the proximity placement group.') + + +# endregion + + +def process_image_version_create_namespace(cmd, namespace): + validate_tags(namespace) + process_gallery_image_version_namespace(cmd, namespace) + process_image_resource_id_namespace(namespace) + + +# endregion + + +def process_image_version_update_namespace(cmd, namespace): + process_gallery_image_version_namespace(cmd, namespace) + + +# endregion + + +def process_image_version_undelete_namespace(cmd, namespace): # pylint: disable=unused-argument + validate_tags(namespace) + + +def process_image_resource_id_namespace(namespace): + """ + Validate the resource id from different sources + Only one of these arguments is allowed to provide + Check the format of resource id whether meets requirement + """ + input_num = (1 if namespace.managed_image else 0) + (1 if namespace.virtual_machine else 0) + \ + (1 if namespace.image_version else 0) + if input_num > 1: + raise MutuallyExclusiveArgumentError( + r'usage error: please specify only one of the --managed-image\--virtual-machine\--image-version arguments') + + if namespace.managed_image or input_num == 0: + return + + from ._vm_utils import is_valid_vm_resource_id, is_valid_image_version_id + is_vm = namespace.virtual_machine is not None + is_valid_function = is_valid_vm_resource_id if is_vm else is_valid_image_version_id + resource_id = namespace.virtual_machine if is_vm else namespace.image_version + + if not is_valid_function(resource_id): + from azure.cli.core.parser import InvalidArgumentValueError + raise InvalidArgumentValueError('usage error: {} is an invalid {} id' + .format(resource_id, 'VM resource' if is_vm else 'gallery image version')) + namespace.managed_image = resource_id + + +# endregion + + +def process_vm_vmss_stop(cmd, namespace): # pylint: disable=unused-argument + if "vmss" in cmd.name: + logger.warning("About to power off the VMSS instances...\nThey will continue to be billed. " + "To deallocate VMSS instances, run: az vmss deallocate.") + else: + logger.warning("About to power off the specified VM...\nIt will continue to be billed. " + "To deallocate a VM, run: az vm deallocate.") + + +def _validate_vmss_update_terminate_notification_related(cmd, namespace): # pylint: disable=unused-argument + """ + Validate vmss update enable_terminate_notification and terminate_notification_time. + If terminate_notification_time is specified, enable_terminate_notification should not be false + If enable_terminate_notification is true, must specify terminate_notification_time + """ + if namespace.enable_terminate_notification is False and namespace.terminate_notification_time is not None: + raise CLIError("usage error: please enable --enable-terminate-notification") + if namespace.enable_terminate_notification is True and namespace.terminate_notification_time is None: + raise CLIError("usage error: please set --terminate-notification-time") + _validate_vmss_terminate_notification(cmd, namespace) + + +def _validate_vmss_terminate_notification(cmd, namespace): # pylint: disable=unused-argument + """ + Transform minutes to ISO 8601 formmat + """ + if namespace.terminate_notification_time is not None: + namespace.terminate_notification_time = 'PT' + namespace.terminate_notification_time + 'M' + + +def _validate_vmss_create_automatic_repairs(cmd, namespace): # pylint: disable=unused-argument + if namespace.automatic_repairs_grace_period is not None or namespace.automatic_repairs_action is not None: + if namespace.load_balancer is None or namespace.health_probe is None: + raise ArgumentUsageError("usage error: --load-balancer and --health-probe are required " + "when creating vmss with automatic repairs") + _validate_vmss_automatic_repairs(cmd, namespace) + + +def _validate_vmss_update_automatic_repairs(cmd, namespace): # pylint: disable=unused-argument + if namespace.enable_automatic_repairs is False and \ + (namespace.automatic_repairs_grace_period is not None or namespace.automatic_repairs_action is not None): + raise ArgumentUsageError("usage error: please enable --enable-automatic-repairs") + if namespace.enable_automatic_repairs is True and namespace.automatic_repairs_grace_period is None \ + and namespace.automatic_repairs_action is None: + raise ArgumentUsageError("usage error: please set --automatic-repairs-grace-period or" + " --automatic-repairs-action") + _validate_vmss_automatic_repairs(cmd, namespace) + + +def _validate_vmss_automatic_repairs(cmd, namespace): # pylint: disable=unused-argument + """ + Transform minutes to ISO 8601 formmat + """ + if namespace.automatic_repairs_grace_period is not None: + namespace.automatic_repairs_grace_period = 'PT' + namespace.automatic_repairs_grace_period + 'M' + + +def _validate_vmss_create_host_group(cmd, namespace): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if namespace.host_group: + if not is_valid_resource_id(namespace.host_group): + namespace.host_group = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', type='hostGroups', name=namespace.host_group + ) + + +def _validate_count(namespace): + if namespace.count < 2 or namespace.count > 250: + raise ValidationError( + '--count should be in [2, 250]. Please make sure your subscription has enough quota of resources') + banned_params = [ + namespace.attach_data_disks, + namespace.attach_os_disk, + namespace.boot_diagnostics_storage, + namespace.computer_name, + namespace.dedicated_host, + namespace.dedicated_host_group, + namespace.nics, + namespace.os_disk_name, + namespace.private_ip_address, + namespace.public_ip_address, + namespace.public_ip_address_dns_name, + namespace.storage_account, + namespace.storage_container_name, + namespace.use_unmanaged_disk, + ] + params_str = [ + '--attach-data-disks', + '--attach-os-disk', + '--boot-diagnostics-storage', + '--computer-name', + '--host', + '--host-group', + '--nics', + '--os-disk-name', + '--private-ip-address', + '--public-ip-address', + '--public-ip-address-dns-name', + '--storage-account', + '--storage-container-name', + '--subnet', + '--use-unmanaged-disk', + '--vnet-name' + ] + if any(param for param in banned_params): + raise ValidationError('When --count is specified, {} are not allowed'.format(', '.join(params_str))) + + +def validate_edge_zone(cmd, namespace): # pylint: disable=unused-argument + if namespace.edge_zone: + namespace.edge_zone = { + 'name': namespace.edge_zone, + 'type': 'EdgeZone' + } + + +def _validate_capacity_reservation_group(cmd, namespace): + if namespace.capacity_reservation_group and namespace.capacity_reservation_group != 'None': + + from azure.mgmt.core.tools import is_valid_resource_id, resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if not is_valid_resource_id(namespace.capacity_reservation_group): + namespace.capacity_reservation_group = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), + resource_group=namespace.resource_group_name, + namespace='Microsoft.Compute', + type='CapacityReservationGroups', + name=namespace.capacity_reservation_group + ) + + +def _validate_vm_vmss_create_ephemeral_placement(namespace): + ephemeral_os_disk = getattr(namespace, 'ephemeral_os_disk', None) + ephemeral_os_disk_placement = getattr(namespace, 'ephemeral_os_disk_placement', None) + if ephemeral_os_disk_placement and not ephemeral_os_disk: + raise ArgumentUsageError('usage error: --ephemeral-os-disk-placement is only configurable when ' + '--ephemeral-os-disk is specified.') + + +def _validate_vm_vmss_update_ephemeral_placement(cmd, namespace): # pylint: disable=unused-argument + size = getattr(namespace, 'size', None) + ephemeral_os_disk_placement = getattr(namespace, 'ephemeral_os_disk_placement', None) + source = getattr(namespace, 'command').split()[0] + if ephemeral_os_disk_placement: + if source == 'vm' and not size: + raise ArgumentUsageError('usage error: --ephemeral-os-disk-placement is only configurable when ' + '--size is specified.') + + +def _validate_community_gallery_legal_agreement_acceptance(cmd, namespace): + from ._vm_utils import is_community_gallery_image_id, parse_community_gallery_image_id + if not is_community_gallery_image_id(namespace.image) or namespace.accept_term: + return + + community_gallery_name, _ = parse_community_gallery_image_id(namespace.image) + from ._client_factory import cf_community_gallery + try: + community_gallery_info = cf_community_gallery(cmd.cli_ctx).get(namespace.location, community_gallery_name) + eula = community_gallery_info.additional_properties['communityMetadata']['eula'] + except Exception as err: + raise CLIInternalError('Get the eula from community gallery failed: {0}'.format(err)) + + from knack.prompting import prompt_y_n + msg = "To create the VM/VMSS from community gallery image, you must accept the license agreement and " \ + "privacy statement: {}. (If you want to accept the legal terms by default, " \ + "please use the option '--accept-term' when creating VM/VMSS)".format(eula) + + if not prompt_y_n(msg, default="y"): + import sys + sys.exit(0) + + +def validate_secure_vm_guest_state_sas(cmd, namespace): + compute_client = _compute_client_factory(cmd.cli_ctx) + disk_info = compute_client.disks.get(namespace.resource_group_name, namespace.disk_name) + DiskCreateOption = cmd.get_models('DiskCreateOption') + + if disk_info.creation_data and disk_info.creation_data.create_option == DiskCreateOption.upload_prepared_secure: + namespace.secure_vm_guest_state_sas = True diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_vm_diagnostics_templates.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_vm_diagnostics_templates.py new file mode 100644 index 00000000000..1ea05038d40 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_vm_diagnostics_templates.py @@ -0,0 +1,983 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +# flake8: noqa +# noqa +# pylint: skip-file + + +def get_default_diag_config(is_windows): + if is_windows: + return { + "WadCfg": { + "DiagnosticMonitorConfiguration": { + "overallQuotaInMB": 4096, + "DiagnosticInfrastructureLogs": { + "scheduledTransferLogLevelFilter": "Error", + "scheduledTransferPeriod": "PT1M" + }, + "WindowsEventLog": { + "scheduledTransferPeriod": "PT1M", + "DataSource": [ + {"name": "Application!*[System[(Level=1 or Level=2)]]"}, + {"name": "System!*[System[(Level=1 or Level=2)]]"} + ] + }, + "Directories": { + "scheduledTransferPeriod": "PT1M" + }, + "PerformanceCounters": { + "scheduledTransferPeriod": "PT1M", + "PerformanceCounterConfiguration": [ + { + "counterSpecifier": "\\Processor(_Total)\\% Processor Time", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "CPU utilization", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Processor(_Total)\\% Privileged Time", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "CPU privileged time", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Processor(_Total)\\% User Time", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "CPU user time", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Processor Information(_Total)\\Processor Frequency", + "sampleRate": "PT15S", + "unit": "Count", + "annotation": [ + { + "displayName": "CPU frequency", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\System\\Processes", + "sampleRate": "PT15S", + "unit": "Count", + "annotation": [ + { + "displayName": "Processes", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Process(_Total)\\Thread Count", + "sampleRate": "PT15S", + "unit": "Count", + "annotation": [ + { + "displayName": "Threads", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Process(_Total)\\Handle Count", + "sampleRate": "PT15S", + "unit": "Count", + "annotation": [ + { + "displayName": "Handles", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Memory\\% Committed Bytes In Use", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "Memory usage", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Memory\\Available Bytes", + "sampleRate": "PT15S", + "unit": "Bytes", + "annotation": [ + { + "displayName": "Memory available", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Memory\\Committed Bytes", + "sampleRate": "PT15S", + "unit": "Bytes", + "annotation": [ + { + "displayName": "Memory committed", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\Memory\\Commit Limit", + "sampleRate": "PT15S", + "unit": "Bytes", + "annotation": [ + { + "displayName": "Memory commit limit", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\% Disk Time", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "Disk active time", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\% Disk Read Time", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "Disk active read time", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\% Disk Write Time", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "Disk active write time", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\Disk Transfers/sec", + "sampleRate": "PT15S", + "unit": "CountPerSecond", + "annotation": [ + { + "displayName": "Disk operations", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\Disk Reads/sec", + "sampleRate": "PT15S", + "unit": "CountPerSecond", + "annotation": [ + { + "displayName": "Disk read operations", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\Disk Writes/sec", + "sampleRate": "PT15S", + "unit": "CountPerSecond", + "annotation": [ + { + "displayName": "Disk write operations", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\Disk Bytes/sec", + "sampleRate": "PT15S", + "unit": "BytesPerSecond", + "annotation": [ + { + "displayName": "Disk speed", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\Disk Read Bytes/sec", + "sampleRate": "PT15S", + "unit": "BytesPerSecond", + "annotation": [ + { + "displayName": "Disk read speed", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\PhysicalDisk(_Total)\\Disk Write Bytes/sec", + "sampleRate": "PT15S", + "unit": "BytesPerSecond", + "annotation": [ + { + "displayName": "Disk write speed", + "locale": "en-us" + } + ] + }, + { + "counterSpecifier": "\\LogicalDisk(_Total)\\% Free Space", + "sampleRate": "PT15S", + "unit": "Percent", + "annotation": [ + { + "displayName": "Disk free space (percentage)", + "locale": "en-us" + } + ] + } + ] + }, + "Metrics": { + "resourceId": "__VM_OR_VMSS_RESOURCE_ID__", + "MetricAggregation": [ + { + "scheduledTransferPeriod": "PT1H" + }, + { + "scheduledTransferPeriod": "PT1M" + } + ] + } + } + }, + "StorageAccount": "__DIAGNOSTIC_STORAGE_ACCOUNT__" + } + else: + return { + "StorageAccount": "__DIAGNOSTIC_STORAGE_ACCOUNT__", + "ladCfg": { + "diagnosticMonitorConfiguration": { + "eventVolume": "Medium", + "metrics": { + "metricAggregation": [ + { + "scheduledTransferPeriod": "PT1H" + }, + { + "scheduledTransferPeriod": "PT1M" + } + ], + "resourceId": "__VM_OR_VMSS_RESOURCE_ID__" + }, + "performanceCounters": { + "performanceCounterConfiguration": [ + { + "annotation": [ + { + "displayName": "Disk read guest OS", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "readbytespersecond", + "counterSpecifier": "/builtin/disk/readbytespersecond", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk writes", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "writespersecond", + "counterSpecifier": "/builtin/disk/writespersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk transfer time", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagetransfertime", + "counterSpecifier": "/builtin/disk/averagetransfertime", + "type": "builtin", + "unit": "Seconds" + }, + { + "annotation": [ + { + "displayName": "Disk transfers", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "transferspersecond", + "counterSpecifier": "/builtin/disk/transferspersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk write guest OS", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "writebytespersecond", + "counterSpecifier": "/builtin/disk/writebytespersecond", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk read time", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagereadtime", + "counterSpecifier": "/builtin/disk/averagereadtime", + "type": "builtin", + "unit": "Seconds" + }, + { + "annotation": [ + { + "displayName": "Disk write time", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagewritetime", + "counterSpecifier": "/builtin/disk/averagewritetime", + "type": "builtin", + "unit": "Seconds" + }, + { + "annotation": [ + { + "displayName": "Disk total bytes", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "bytespersecond", + "counterSpecifier": "/builtin/disk/bytespersecond", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk reads", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "readspersecond", + "counterSpecifier": "/builtin/disk/readspersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Disk queue length", + "locale": "en-us" + } + ], + "class": "disk", + "condition": "IsAggregate=TRUE", + "counter": "averagediskqueuelength", + "counterSpecifier": "/builtin/disk/averagediskqueuelength", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Network in guest OS", + "locale": "en-us" + } + ], + "class": "network", + "counter": "bytesreceived", + "counterSpecifier": "/builtin/network/bytesreceived", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Network total bytes", + "locale": "en-us" + } + ], + "class": "network", + "counter": "bytestotal", + "counterSpecifier": "/builtin/network/bytestotal", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Network out guest OS", + "locale": "en-us" + } + ], + "class": "network", + "counter": "bytestransmitted", + "counterSpecifier": "/builtin/network/bytestransmitted", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Network collisions", + "locale": "en-us" + } + ], + "class": "network", + "counter": "totalcollisions", + "counterSpecifier": "/builtin/network/totalcollisions", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets received errors", + "locale": "en-us" + } + ], + "class": "network", + "counter": "totalrxerrors", + "counterSpecifier": "/builtin/network/totalrxerrors", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets sent", + "locale": "en-us" + } + ], + "class": "network", + "counter": "packetstransmitted", + "counterSpecifier": "/builtin/network/packetstransmitted", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets received", + "locale": "en-us" + } + ], + "class": "network", + "counter": "packetsreceived", + "counterSpecifier": "/builtin/network/packetsreceived", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Packets sent errors", + "locale": "en-us" + } + ], + "class": "network", + "counter": "totaltxerrors", + "counterSpecifier": "/builtin/network/totaltxerrors", + "type": "builtin", + "unit": "Count" + }, + { + "annotation": [ + { + "displayName": "Filesystem transfers/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "transferspersecond", + "counterSpecifier": "/builtin/filesystem/transferspersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem % free space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentfreespace", + "counterSpecifier": "/builtin/filesystem/percentfreespace", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Filesystem % used space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentusedspace", + "counterSpecifier": "/builtin/filesystem/percentusedspace", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Filesystem used space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "usedspace", + "counterSpecifier": "/builtin/filesystem/usedspace", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Filesystem read bytes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "bytesreadpersecond", + "counterSpecifier": "/builtin/filesystem/bytesreadpersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem free space", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "freespace", + "counterSpecifier": "/builtin/filesystem/freespace", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Filesystem % free inodes", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentfreeinodes", + "counterSpecifier": "/builtin/filesystem/percentfreeinodes", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Filesystem bytes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "bytespersecond", + "counterSpecifier": "/builtin/filesystem/bytespersecond", + "type": "builtin", + "unit": "BytesPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem reads/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "readspersecond", + "counterSpecifier": "/builtin/filesystem/readspersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem write bytes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "byteswrittenpersecond", + "counterSpecifier": "/builtin/filesystem/byteswrittenpersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem writes/sec", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "writespersecond", + "counterSpecifier": "/builtin/filesystem/writespersecond", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Filesystem % used inodes", + "locale": "en-us" + } + ], + "class": "filesystem", + "condition": "IsAggregate=TRUE", + "counter": "percentusedinodes", + "counterSpecifier": "/builtin/filesystem/percentusedinodes", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU IO wait time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentiowaittime", + "counterSpecifier": "/builtin/processor/percentiowaittime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU user time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentusertime", + "counterSpecifier": "/builtin/processor/percentusertime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU nice time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentnicetime", + "counterSpecifier": "/builtin/processor/percentnicetime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU percentage guest OS", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentprocessortime", + "counterSpecifier": "/builtin/processor/percentprocessortime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU interrupt time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentinterrupttime", + "counterSpecifier": "/builtin/processor/percentinterrupttime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU idle time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentidletime", + "counterSpecifier": "/builtin/processor/percentidletime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "CPU privileged time", + "locale": "en-us" + } + ], + "class": "processor", + "condition": "IsAggregate=TRUE", + "counter": "percentprivilegedtime", + "counterSpecifier": "/builtin/processor/percentprivilegedtime", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Memory available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "availablememory", + "counterSpecifier": "/builtin/memory/availablememory", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Swap percent used", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentusedswap", + "counterSpecifier": "/builtin/memory/percentusedswap", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Memory used", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "usedmemory", + "counterSpecifier": "/builtin/memory/usedmemory", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Page reads", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "pagesreadpersec", + "counterSpecifier": "/builtin/memory/pagesreadpersec", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Swap available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "availableswap", + "counterSpecifier": "/builtin/memory/availableswap", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Swap percent available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentavailableswap", + "counterSpecifier": "/builtin/memory/percentavailableswap", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Mem. percent available", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentavailablememory", + "counterSpecifier": "/builtin/memory/percentavailablememory", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Pages", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "pagespersec", + "counterSpecifier": "/builtin/memory/pagespersec", + "type": "builtin", + "unit": "CountPerSecond" + }, + { + "annotation": [ + { + "displayName": "Swap used", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "usedswap", + "counterSpecifier": "/builtin/memory/usedswap", + "type": "builtin", + "unit": "Bytes" + }, + { + "annotation": [ + { + "displayName": "Memory percentage", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "percentusedmemory", + "counterSpecifier": "/builtin/memory/percentusedmemory", + "type": "builtin", + "unit": "Percent" + }, + { + "annotation": [ + { + "displayName": "Page writes", + "locale": "en-us" + } + ], + "class": "memory", + "counter": "pageswrittenpersec", + "counterSpecifier": "/builtin/memory/pageswrittenpersec", + "type": "builtin", + "unit": "CountPerSecond" + } + ] + }, + "syslogEvents": { + "syslogEventConfiguration": { + "LOG_AUTH": "LOG_DEBUG", + "LOG_AUTHPRIV": "LOG_DEBUG", + "LOG_CRON": "LOG_DEBUG", + "LOG_DAEMON": "LOG_DEBUG", + "LOG_FTP": "LOG_DEBUG", + "LOG_KERN": "LOG_DEBUG", + "LOG_LOCAL0": "LOG_DEBUG", + "LOG_LOCAL1": "LOG_DEBUG", + "LOG_LOCAL2": "LOG_DEBUG", + "LOG_LOCAL3": "LOG_DEBUG", + "LOG_LOCAL4": "LOG_DEBUG", + "LOG_LOCAL5": "LOG_DEBUG", + "LOG_LOCAL6": "LOG_DEBUG", + "LOG_LOCAL7": "LOG_DEBUG", + "LOG_LPR": "LOG_DEBUG", + "LOG_MAIL": "LOG_DEBUG", + "LOG_NEWS": "LOG_DEBUG", + "LOG_SYSLOG": "LOG_DEBUG", + "LOG_USER": "LOG_DEBUG", + "LOG_UUCP": "LOG_DEBUG" + } + } + }, + "sampleRateInSeconds": 15 + } + } diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_vm_utils.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_vm_utils.py new file mode 100644 index 00000000000..5263472e8ad --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/_vm_utils.py @@ -0,0 +1,758 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import importlib +import json +import os +import re + +from azure.cli.core.commands.arm import ArmTemplateBuilder + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse # pylint: disable=import-error + +from knack.log import get_logger +from knack.util import CLIError + +logger = get_logger(__name__) + +MSI_LOCAL_ID = '[system]' + + +def get_target_network_api(cli_ctx): + """ + The fixed version of network used by ARM template deployment. + This is consistent with the version settings of other RP to ensure the stability of core commands "az vm create" + and "az vmss create". + In addition, it can also reduce the workload of re-recording a large number of vm tests after bumping the + network api-version. + Since it does not use the Python SDK, so it will not increase the dependence on the Python SDK + """ + if cli_ctx.cloud.profile == 'latest': + version = '2022-01-01' + else: + from azure.cli.core.profiles import get_api_version, ResourceType + version = get_api_version(cli_ctx, ResourceType.MGMT_NETWORK) + return version + + +def read_content_if_is_file(string_or_file): + content = string_or_file + if os.path.exists(string_or_file): + with open(string_or_file, 'r') as f: + content = f.read() + return content + + +def _resolve_api_version(cli_ctx, provider_namespace, resource_type, parent_path): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) + provider = client.providers.get(provider_namespace) + + # If available, we will use parent resource's api-version + resource_type_str = (parent_path.split('/')[0] if parent_path else resource_type) + + rt = [t for t in provider.resource_types # pylint: disable=no-member + if t.resource_type.lower() == resource_type_str.lower()] + if not rt: + raise CLIError('Resource type {} not found.'.format(resource_type_str)) + if len(rt) == 1 and rt[0].api_versions: + npv = [v for v in rt[0].api_versions if 'preview' not in v.lower()] + return npv[0] if npv else rt[0].api_versions[0] + raise CLIError( + 'API version is required and could not be resolved for resource {}' + .format(resource_type)) + + +def log_pprint_template(template): + logger.info('==== BEGIN TEMPLATE ====') + logger.info(json.dumps(template, indent=2)) + logger.info('==== END TEMPLATE ====') + + +def check_existence(cli_ctx, value, resource_group, provider_namespace, resource_type, + parent_name=None, parent_type=None): + # check for name or ID and set the type flags + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.core.exceptions import HttpResponseError + from azure.mgmt.core.tools import parse_resource_id + from azure.cli.core.profiles import ResourceType + id_parts = parse_resource_id(value) + resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, + subscription_id=id_parts.get('subscription', None)).resources + rg = id_parts.get('resource_group', resource_group) + ns = id_parts.get('namespace', provider_namespace) + + if parent_name and parent_type: + parent_path = '{}/{}'.format(parent_type, parent_name) + resource_name = id_parts.get('child_name_1', value) + resource_type = id_parts.get('child_type_1', resource_type) + else: + parent_path = '' + resource_name = id_parts['name'] + resource_type = id_parts.get('type', resource_type) + api_version = _resolve_api_version(cli_ctx, provider_namespace, resource_type, parent_path) + + try: + resource_client.get(rg, ns, parent_path, resource_type, resource_name, api_version) + return True + except HttpResponseError: + return False + + +def create_data_plane_keyvault_certificate_client(cli_ctx, vault_base_url): + from azure.cli.command_modules.keyvault._client_factory import data_plane_azure_keyvault_certificate_client + return data_plane_azure_keyvault_certificate_client(cli_ctx, {"vault_base_url": vault_base_url}) + + +def create_data_plane_keyvault_key_client(cli_ctx, vault_base_url): + from azure.cli.command_modules.keyvault._client_factory import data_plane_azure_keyvault_key_client + return data_plane_azure_keyvault_key_client(cli_ctx, {"vault_base_url": vault_base_url}) + + +def get_key_vault_base_url(cli_ctx, vault_name): + suffix = cli_ctx.cloud.suffixes.keyvault_dns + return 'https://{}{}'.format(vault_name, suffix) + + +def list_sku_info(cli_ctx, location=None): + from ._client_factory import _compute_client_factory + + def _match_location(loc, locations): + return next((x for x in locations if x.lower() == loc.lower()), None) + + client = _compute_client_factory(cli_ctx) + result = client.resource_skus.list() + if location: + result = [r for r in result if _match_location(location, r.locations)] + return result + + +def is_sku_available(cmd, sku_info, zone): + """ + The SKU is unavailable in the following cases: + 1. regional restriction and the region is restricted + 2. parameter --zone is input which indicates only showing skus with availability zones. + Meanwhile, zonal restriction and all zones are restricted + """ + is_available = True + is_restrict_zone = False + is_restrict_location = False + if not sku_info.restrictions: + return is_available + for restriction in sku_info.restrictions: + if restriction.reason_code == 'NotAvailableForSubscription': + # The attribute location_info is not supported in versions 2017-03-30 and earlier + if cmd.supported_api_version(max_api='2017-03-30'): + is_available = False + break + if restriction.type == 'Zone' and not ( + set(sku_info.location_info[0].zones or []) - set(restriction.restriction_info.zones or [])): + is_restrict_zone = True + if restriction.type == 'Location' and ( + sku_info.location_info[0].location in (restriction.restriction_info.locations or [])): + is_restrict_location = True + + if is_restrict_location or (is_restrict_zone and zone): + is_available = False + break + return is_available + + +# pylint: disable=too-many-statements, too-many-branches, too-many-locals +def normalize_disk_info(image_data_disks=None, + data_disk_sizes_gb=None, attach_data_disks=None, storage_sku=None, + os_disk_caching=None, data_disk_cachings=None, size='', + ephemeral_os_disk=False, ephemeral_os_disk_placement=None, + data_disk_delete_option=None, source_snapshots_or_disks=None, + source_snapshots_or_disks_size_gb=None, source_disk_restore_point=None, + source_disk_restore_point_size_gb=None): + from azure.mgmt.core.tools import is_valid_resource_id + from ._validators import validate_delete_options + is_lv_size = re.search('_L[0-9]+s', size, re.I) + # we should return a dictionary with info like below + # { + # 'os': { caching: 'Read', write_accelerator: None}, + # 0: { caching: 'None', write_accelerator: True}, + # 1: { caching: 'None', write_accelerator: True}, + # } + info = {} + used_luns = set() + + attach_data_disks = attach_data_disks or [] + data_disk_sizes_gb = data_disk_sizes_gb or [] + image_data_disks = image_data_disks or [] + source_snapshots_or_disks = source_snapshots_or_disks or [] + source_snapshots_or_disks_size_gb = source_snapshots_or_disks_size_gb or [] + source_disk_restore_point = source_disk_restore_point or [] + source_disk_restore_point_size_gb = source_disk_restore_point_size_gb or [] + + if data_disk_delete_option: + if attach_data_disks: + data_disk_delete_option = validate_delete_options(attach_data_disks, data_disk_delete_option) + else: + if isinstance(data_disk_delete_option, list) and len(data_disk_delete_option) == 1 and len( + data_disk_delete_option[0].split('=')) == 1: # pylint: disable=line-too-long + data_disk_delete_option = data_disk_delete_option[0] + info['os'] = {} + # update os diff disk settings + if ephemeral_os_disk: + info['os']['diffDiskSettings'] = {'option': 'Local'} + # local os disks require readonly caching, default to ReadOnly if os_disk_caching not specified. + if not os_disk_caching: + os_disk_caching = 'ReadOnly' + if ephemeral_os_disk_placement: + info['os']['diffDiskSettings']['placement'] = ephemeral_os_disk_placement + + # add managed image data disks + for data_disk in image_data_disks: + i = data_disk['lun'] + info[i] = { + 'lun': i, + 'managedDisk': {'storageAccountType': None}, + 'createOption': 'fromImage' + } + used_luns.add(i) + + # add empty data disks, do not use existing luns + i = 0 + sizes_copy = list(data_disk_sizes_gb) + while sizes_copy: + # get free lun + while i in used_luns: + i += 1 + + used_luns.add(i) + + info[i] = { + 'lun': i, + 'managedDisk': {'storageAccountType': None}, + 'createOption': 'empty', + 'diskSizeGB': sizes_copy.pop(0), + } + if isinstance(data_disk_delete_option, str): + info[i]['deleteOption'] = data_disk_delete_option + + # add copy data disks + i = 0 + source_resource_copy = list(source_snapshots_or_disks) + source_resource_copy_size = list(source_snapshots_or_disks_size_gb) + while source_resource_copy: + while i in used_luns: + i += 1 + + used_luns.add(i) + + info[i] = { + 'lun': i, + 'createOption': 'copy', + 'managedDisk': {'storageAccountType': None}, + 'diskSizeGB': source_resource_copy_size.pop(0), + 'sourceResource': { + 'id': source_resource_copy.pop(0) + } + } + + # add restore data disks + i = 0 + source_resource_restore = list(source_disk_restore_point) + source_resource_restore_size = list(source_disk_restore_point_size_gb) + while source_resource_restore: + while i in used_luns: + i += 1 + + used_luns.add(i) + + info[i] = { + 'lun': i, + 'createOption': 'restore', + 'managedDisk': {'storageAccountType': None}, + 'diskSizeGB': source_resource_restore_size.pop(0), + 'sourceResource': { + 'id': source_resource_restore.pop(0) + } + } + + # update storage skus for managed data disks + if storage_sku is not None: + update_disk_sku_info(info, storage_sku) + + # check that os storage account type is not UltraSSD_LRS + if info['os'].get('storageAccountType', "").lower() == 'ultrassd_lrs': + logger.warning("Managed os disk storage account sku cannot be UltraSSD_LRS. Using service default.") + info['os']['storageAccountType'] = None + + # add attached data disks + i = 0 + attach_data_disks_copy = list(attach_data_disks) + while attach_data_disks_copy: + # get free lun + while i in used_luns: + i += 1 + + used_luns.add(i) + + # use free lun + info[i] = { + 'lun': i, + 'createOption': 'attach' + } + + d = attach_data_disks_copy.pop(0) + info[i]['name'] = d.split('/')[-1].split('.')[0] + if is_valid_resource_id(d): + info[i]['managedDisk'] = {'id': d} + if data_disk_delete_option: + info[i]['deleteOption'] = data_disk_delete_option if isinstance(data_disk_delete_option, str) \ + else data_disk_delete_option.get(info[i]['name'], None) + else: + info[i]['vhd'] = {'uri': d} + if data_disk_delete_option: + info[i]['deleteOption'] = data_disk_delete_option if isinstance(data_disk_delete_option, str) \ + else data_disk_delete_option.get(info[i]['name'], None) + + # fill in data disk caching + if data_disk_cachings: + update_disk_caching(info, data_disk_cachings) + + # default os disk caching to 'ReadWrite' unless set otherwise + if os_disk_caching: + info['os']['caching'] = os_disk_caching + else: + info['os']['caching'] = 'None' if is_lv_size else 'ReadWrite' + + # error out on invalid vm sizes + if is_lv_size: + for v in info.values(): + if v.get('caching', 'None').lower() != 'none': + raise CLIError('usage error: for Lv series of machines, "None" is the only supported caching mode') + + result_info = {'os': info['os']} + + # in python 3 insertion order matters during iteration. This ensures that luns are retrieved in numerical order + for key in sorted([key for key in info if key != 'os']): + result_info[key] = info[key] + + return result_info + + +def update_disk_caching(model, caching_settings): + def _update(model, lun, value): + if isinstance(model, dict): + luns = model.keys() if lun is None else [lun] + for lun_item in luns: + if lun_item not in model: + raise CLIError("Data disk with lun of '{}' doesn't exist. Existing luns: {}." + .format(lun_item, list(model.keys()))) + model[lun_item]['caching'] = value + else: + if lun is None: + disks = [model.os_disk] + (model.data_disks or []) + elif lun == 'os': + disks = [model.os_disk] + else: + disk = next((d for d in model.data_disks if d.lun == lun), None) + if not disk: + raise CLIError("data disk with lun of '{}' doesn't exist".format(lun)) + disks = [disk] + for disk in disks: + disk.caching = value + + if len(caching_settings) == 1 and '=' not in caching_settings[0]: + _update(model, None, caching_settings[0]) + else: + for x in caching_settings: + if '=' not in x: + raise CLIError("usage error: please use 'LUN=VALUE' to configure caching on individual disk") + lun, value = x.split('=', 1) + lun = lun.lower() + lun = int(lun) if lun != 'os' else lun + _update(model, lun, value) + + +def update_write_accelerator_settings(model, write_accelerator_settings): + def _update(model, lun, value): + if isinstance(model, dict): + luns = model.keys() if lun is None else [lun] + for lun_item in luns: + if lun_item not in model: + raise CLIError("data disk with lun of '{}' doesn't exist".format(lun_item)) + model[lun_item]['writeAcceleratorEnabled'] = value + else: + if lun is None: + disks = [model.os_disk] + (model.data_disks or []) + elif lun == 'os': + disks = [model.os_disk] + else: + disk = next((d for d in model.data_disks if d.lun == lun), None) + if not disk: + raise CLIError("data disk with lun of '{}' doesn't exist".format(lun)) + disks = [disk] + for disk in disks: + disk.write_accelerator_enabled = value + + if len(write_accelerator_settings) == 1 and '=' not in write_accelerator_settings[0]: + _update(model, None, write_accelerator_settings[0].lower() == 'true') + else: + for x in write_accelerator_settings: + if '=' not in x: + raise CLIError("usage error: please use 'LUN=VALUE' to configure write accelerator" + " on individual disk") + lun, value = x.split('=', 1) + lun = lun.lower() + lun = int(lun) if lun != 'os' else lun + _update(model, lun, value.lower() == 'true') + + +def get_storage_blob_uri(cli_ctx, storage): + from azure.cli.core.profiles._shared import ResourceType + from azure.cli.core.commands.client_factory import get_mgmt_service_client + if urlparse(storage).scheme: + storage_uri = storage + else: + storage_mgmt_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE) + storage_accounts = storage_mgmt_client.storage_accounts.list() + storage_account = next((a for a in list(storage_accounts) + if a.name.lower() == storage.lower()), None) + if storage_account is None: + raise CLIError('{} does\'t exist.'.format(storage)) + storage_uri = storage_account.primary_endpoints.blob + return storage_uri + + +def update_disk_sku_info(info_dict, skus): + usage_msg = 'Usage:\n\t[--storage-sku SKU | --storage-sku ID=SKU ID=SKU ID=SKU...]\n' \ + 'where each ID is "os" or a 0-indexed lun.' + + def _update(info, lun, value): + luns = info.keys() + if lun not in luns: + raise CLIError("Data disk with lun of '{}' doesn't exist. Existing luns: {}.".format(lun, luns)) + if lun == 'os': + info[lun]['storageAccountType'] = value + else: + info[lun]['managedDisk']['storageAccountType'] = value + + if len(skus) == 1 and '=' not in skus[0]: + for lun in info_dict.keys(): + _update(info_dict, lun, skus[0]) + else: + for sku in skus: + if '=' not in sku: + raise CLIError("A sku's format is incorrect.\n{}".format(usage_msg)) + + lun, value = sku.split('=', 1) + lun = lun.lower() + try: + lun = int(lun) if lun != "os" else lun + except ValueError: + raise CLIError("A sku ID is incorrect.\n{}".format(usage_msg)) + _update(info_dict, lun, value) + + +def is_shared_gallery_image_id(image_reference): + if not image_reference: + return False + + shared_gallery_id_pattern = re.compile(r'^/SharedGalleries/[^/]*/Images/[^/]*/Versions/.*$', re.IGNORECASE) + if shared_gallery_id_pattern.match(image_reference): + return True + + return False + + +def is_valid_vm_resource_id(vm_resource_id): + if not vm_resource_id: + return False + + vm_id_pattern = re.compile(r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.Compute/' + r'virtualMachines/.*$', re.IGNORECASE) + if vm_id_pattern.match(vm_resource_id): + return True + + return False + + +def is_valid_vmss_resource_id(vmss_resource_id): + if not vmss_resource_id: + return False + + vmss_id_pattern = re.compile(r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.Compute/' + r'virtualMachineScaleSets/.*$', re.IGNORECASE) + if vmss_id_pattern.match(vmss_resource_id): + return True + + return False + + +def is_valid_image_version_id(image_version_id): + if not image_version_id: + return False + + image_version_id_pattern = re.compile(r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.Compute/' + r'galleries/[^/]*/images/[^/]*/versions/.*$', re.IGNORECASE) + if image_version_id_pattern.match(image_version_id): + return True + + return False + + +def is_valid_vm_image_id(image_image_id): + if not image_image_id: + return False + + image_version_id_pattern = re.compile(r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.Compute/' + r'images/.*$', re.IGNORECASE) + if image_version_id_pattern.match(image_image_id): + return True + + return False + + +def parse_gallery_image_id(image_reference): + from azure.cli.core.azclierror import InvalidArgumentValueError + + if not image_reference: + raise InvalidArgumentValueError( + 'Please pass in the gallery image id through the parameter --image') + + image_info = re.search(r'^/subscriptions/([^/]*)/resourceGroups/([^/]*)/providers/Microsoft.Compute/' + r'galleries/([^/]*)/images/([^/]*)/versions/.*$', image_reference, re.IGNORECASE) + if not image_info or len(image_info.groups()) < 2: + raise InvalidArgumentValueError( + 'The gallery image id is invalid. The valid format should be "/subscriptions/{sub_id}' + '/resourceGroups/{rg}/providers/Microsoft.Compute/galleries/{gallery_name}' + '/Images/{gallery_image_name}/Versions/{image_version}"') + + # Return the gallery subscription id, resource group name, gallery name and gallery image name. + return image_info.group(1), image_info.group(2), image_info.group(3), image_info.group(4) + + +def parse_shared_gallery_image_id(image_reference): + from azure.cli.core.azclierror import InvalidArgumentValueError + + if not image_reference: + raise InvalidArgumentValueError( + 'Please pass in the shared gallery image id through the parameter --image') + + image_info = re.search(r'^/SharedGalleries/([^/]*)/Images/([^/]*)/Versions/.*$', image_reference, re.IGNORECASE) + if not image_info or len(image_info.groups()) < 2: + raise InvalidArgumentValueError( + 'The shared gallery image id is invalid. The valid format should be ' + '"/SharedGalleries/{gallery_unique_name}/Images/{gallery_image_name}/Versions/{image_version}"') + + # Return the gallery unique name and gallery image name parsed from shared gallery image id + return image_info.group(1), image_info.group(2) + + +def parse_vm_image_id(image_id): + from azure.cli.core.azclierror import InvalidArgumentValueError + + image_info = re.search(r'^/subscriptions/([^/]*)/resourceGroups/([^/]*)/providers/Microsoft.Compute/' + r'images/(.*$)', image_id, re.IGNORECASE) + if not image_info or len(image_info.groups()) < 2: + raise InvalidArgumentValueError( + 'The gallery image id is invalid. The valid format should be "/subscriptions/{sub_id}' + '/resourceGroups/{rg}/providers/Microsoft.Compute/images/{image_name}"') + + # Return the gallery subscription id, resource group name and image name. + return image_info.group(1), image_info.group(2), image_info.group(3) + + +def is_compute_gallery_image_id(image_reference): + if not image_reference: + return False + + compute_gallery_id_pattern = re.compile(r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.Compute/' + r'galleries/[^/]*/images/.*$', re.IGNORECASE) + if compute_gallery_id_pattern.match(image_reference): + return True + + return False + + +def is_community_gallery_image_id(image_reference): + if not image_reference: + return False + + community_gallery_id_pattern = re.compile(r'^/CommunityGalleries/[^/]*/Images/[^/]*/Versions/.*$', re.IGNORECASE) + if community_gallery_id_pattern.match(image_reference): + return True + + return False + + +def parse_community_gallery_image_id(image_reference): + from azure.cli.core.azclierror import InvalidArgumentValueError + + if not image_reference: + raise InvalidArgumentValueError( + 'Please pass in the community gallery image id through the parameter --image') + + image_info = re.search(r'^/CommunityGalleries/([^/]*)/Images/([^/]*)/Versions/.*$', image_reference, re.IGNORECASE) + if not image_info or len(image_info.groups()) < 2: + raise InvalidArgumentValueError( + 'The community gallery image id is invalid. The valid format should be ' + '"/CommunityGalleries/{gallery_unique_name}/Images/{gallery_image_name}/Versions/{image_version}"') + + # Return the gallery unique name and gallery image name parsed from community gallery image id + return image_info.group(1), image_info.group(2) + + +class ArmTemplateBuilder20190401(ArmTemplateBuilder): + + def __init__(self): + super().__init__() + self.template['$schema'] = 'https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#' + + +def raise_unsupported_error_for_flex_vmss(vmss, error_message): + if hasattr(vmss, 'orchestration_mode') and vmss.orchestration_mode \ + and vmss.orchestration_mode.lower() == 'flexible': + from azure.cli.core.azclierror import ArgumentUsageError + raise ArgumentUsageError(error_message) + + +def is_trusted_launch_supported(supported_features): + if not supported_features: + return False + + trusted_launch = {'TrustedLaunchSupported', 'TrustedLaunch', 'TrustedLaunchAndConfidentialVmSupported'} + + return bool(trusted_launch.intersection({feature.value for feature in supported_features})) + + +def trusted_launch_warning_log(namespace, generation_version, features): + if not generation_version: + return + + from ._constants import TLAD_DEFAULT_CHANGE_MSG + log_message = TLAD_DEFAULT_CHANGE_MSG.format('az vm/vmss create') + + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT + if generation_version == 'V1': + if namespace.security_type and namespace.security_type == COMPATIBLE_SECURITY_TYPE_VALUE: + logger.warning(UPGRADE_SECURITY_HINT) + else: + logger.warning(log_message) + + if generation_version == 'V2' and is_trusted_launch_supported(features): + if not namespace.security_type: + logger.warning(log_message) + elif namespace.security_type == COMPATIBLE_SECURITY_TYPE_VALUE: + logger.warning(UPGRADE_SECURITY_HINT) + + +def validate_vm_disk_trusted_launch(namespace, disk_security_profile): + from ._constants import UPGRADE_SECURITY_HINT + + if disk_security_profile is None: + logger.warning(UPGRADE_SECURITY_HINT) + return + + security_type = disk_security_profile.security_type if hasattr(disk_security_profile, 'security_type') else None + if security_type.lower() == 'trustedlaunch': + if namespace.enable_secure_boot is None: + namespace.enable_secure_boot = True + if namespace.enable_vtpm is None: + namespace.enable_vtpm = True + namespace.security_type = 'TrustedLaunch' + elif security_type.lower() == 'standard': + logger.warning(UPGRADE_SECURITY_HINT) + + +def validate_image_trusted_launch(namespace): + from ._constants import UPGRADE_SECURITY_HINT + + # set securityType to Standard by default if no inputs by end user + if namespace.security_type is None: + namespace.security_type = 'Standard' + if namespace.security_type.lower() != 'trustedlaunch': + logger.warning(UPGRADE_SECURITY_HINT) + + +def display_region_recommendation(cmd, namespace): + identified_region_maps = { + 'westeurope': 'uksouth', + 'francecentral': 'northeurope', + 'germanywestcentral': 'northeurope' + } + + identified_region = identified_region_maps.get(namespace.location) + from azure.cli.core import telemetry + telemetry.set_region_identified(namespace.location, identified_region) + + if identified_region and cmd.cli_ctx.config.getboolean('core', 'display_region_identified', True): + from azure.cli.core.style import Style, print_styled_text + import sys + # pylint: disable=line-too-long + recommend_region = 'Selecting "' + identified_region + '" may reduce your costs. ' \ + 'The region you\'ve selected may cost more for the same services. ' \ + 'You can disable this message in the future with the command ' + disable_config = '"az config set core.display_region_identified=false". ' + learn_more_msg = 'Learn more at https://go.microsoft.com/fwlink/?linkid=222571 ' + # Since the output of the "az vm create" command is a JSON object + # which can be used for automated script parsing + # So we output the notification message to sys.stderr + print_styled_text([(Style.WARNING, recommend_region), (Style.ACTION, disable_config), + (Style.WARNING, learn_more_msg)], file=sys.stderr) + print_styled_text(file=sys.stderr) + + +def import_aaz_by_profile(profile, module_name): + from azure.cli.core.aaz.utils import get_aaz_profile_module_name + profile_module_name = get_aaz_profile_module_name(profile_name=profile) + return importlib.import_module(f"azure.cli.command_modules.vm.aaz.{profile_module_name}.{module_name}") + + +def generate_ssh_keys_ed25519(private_key_filepath, public_key_filepath): + def _open(filename, mode): + return os.open(filename, flags=os.O_WRONLY | os.O_TRUNC | os.O_CREAT, mode=mode) + + from cryptography.hazmat.primitives import serialization + from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey + + ssh_dir = os.path.dirname(private_key_filepath) + if not os.path.exists(ssh_dir): + os.makedirs(ssh_dir, mode=0o700) + + if os.path.isfile(private_key_filepath): + # Try to use existing private key if it exists. + with open(private_key_filepath, "rb") as f: + private_bytes = f.read() + private_key = serialization.load_ssh_private_key(private_bytes, password=None) + logger.warning("Private SSH key file '%s' was found in the directory: '%s'. " + "A paired public key file '%s' will be generated.", + private_key_filepath, ssh_dir, public_key_filepath) + + else: + # Otherwise generate new private key. + private_key = Ed25519PrivateKey.generate() + + # The private key will look like: + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + private_bytes = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.OpenSSH, + encryption_algorithm=serialization.NoEncryption() + ) + + with os.fdopen(_open(private_key_filepath, 0o600), "wb") as f: + f.write(private_bytes) + + public_key = private_key.public_key() + public_bytes = public_key.public_bytes( + encoding=serialization.Encoding.OpenSSH, + format=serialization.PublicFormat.OpenSSH) + + with os.fdopen(_open(public_key_filepath, 0o644), 'wb') as f: + f.write(public_bytes) + + return public_bytes.decode() diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/commands.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/commands.py new file mode 100644 index 00000000000..a386d659b38 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/commands.py @@ -0,0 +1,603 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +from azure.cli.command_modules.vm.azure_stack._client_factory import (cf_vm, cf_avail_set, + cf_vm_ext, cf_vm_ext_image, + cf_vm_image, cf_vm_image_term, cf_usage, + cf_vmss, cf_disks, cf_snapshots, + cf_disk_accesses, cf_images, cf_run_commands, + cf_galleries, cf_gallery_images, + cf_gallery_image_versions, + cf_proximity_placement_groups, + cf_dedicated_hosts, cf_dedicated_host_groups, + cf_log_analytics_data_plane, + cf_disk_encryption_set, cf_shared_galleries, + cf_gallery_sharing_profile, + cf_shared_gallery_image, + cf_shared_gallery_image_version, + cf_capacity_reservation_groups, + cf_capacity_reservations, + cf_vmss_run_commands, cf_gallery_application, + cf_gallery_application_version, cf_restore_point, + cf_restore_point_collection, cf_community_gallery, + cf_community_gallery_image, + cf_community_gallery_image_version) +from azure.cli.command_modules.vm.azure_stack._format import ( + transform_ip_addresses, transform_vm, transform_vm_create_output, transform_vm_usage_list, transform_vm_list, + transform_disk_create_table_output, transform_sku_for_table_output, + transform_extension_show_table_output, get_vmss_table_output_transformer, + transform_vm_encryption_show_table_output, transform_log_analytics_query_output) +from azure.cli.command_modules.vm.azure_stack._validators import ( + process_vm_create_namespace, process_vmss_create_namespace, process_image_create_namespace, + process_disk_create_namespace, process_snapshot_create_namespace, + process_disk_encryption_namespace, process_assign_identity_namespace, + process_remove_identity_namespace, process_vm_secret_format, process_vm_vmss_stop, validate_vmss_update_namespace, + process_vm_update_namespace, process_set_applications_namespace, process_vm_disk_attach_namespace, + process_image_version_create_namespace, process_image_version_update_namespace, + process_image_version_undelete_namespace, process_ppg_create_namespace, process_vm_disk_detach_namespace) + +from azure.cli.command_modules.vm.azure_stack._image_builder import ( + process_image_template_create_namespace, process_img_tmpl_output_add_namespace, + process_img_tmpl_customizer_add_namespace, image_builder_client_factory, cf_img_bldr_image_templates) + +from azure.cli.core.commands import DeploymentOutputLongRunningOperation, CliCommandType +from azure.cli.core.commands.arm import deployment_validate_table_format, handle_template_based_exception + +from azure.cli.command_modules.monitor._exception_handler import exception_handler as monitor_exception_handler +from azure.cli.command_modules.monitor._client_factory import cf_metric_def +from azure.cli.core.profiles import ResourceType + + +# pylint: disable=line-too-long, too-many-statements, too-many-locals +def load_command_table(self, _): + custom_tmpl = 'azure.cli.command_modules.vm.azure_stack.custom#{}' + + compute_custom = CliCommandType(operations_tmpl=custom_tmpl) + + compute_disk_encryption_custom = CliCommandType( + operations_tmpl='azure.cli.command_modules.vm.azure_stack.disk_encryption#{}', + operation_group='virtual_machines' + ) + + image_builder_custom = CliCommandType( + operations_tmpl='azure.cli.command_modules.vm.azure_stack._image_builder#{}', + client_factory=image_builder_client_factory + ) + + compute_availset_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#AvailabilitySetsOperations.{}', + client_factory=cf_avail_set, + operation_group='availability_sets' + ) + + compute_disk_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#DisksOperations.{}', + client_factory=cf_disks, + operation_group='disks' + ) + + compute_disk_access_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#DiskAccessesOperations.{}', + client_factory=cf_disk_accesses, + operation_group='disk_accesses' + ) + + compute_image_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#ImagesOperations.{}', + client_factory=cf_images + ) + + compute_snapshot_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#SnapshotsOperations.{}', + client_factory=cf_snapshots + ) + + compute_vm_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachinesOperations.{}', + client_factory=cf_vm + ) + + compute_vm_extension_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachineExtensionsOperations.{}', + client_factory=cf_vm_ext + ) + + compute_vm_extension_image_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachineExtensionImagesOperations.{}', + client_factory=cf_vm_ext_image + ) + + compute_vm_image_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachineImagesOperations.{}', + client_factory=cf_vm_image + ) + + compute_vm_image_term_sdk = CliCommandType( + operations_tmpl='azure.mgmt.marketplaceordering.operations#MarketplaceAgreementsOperations.{}', + client_factory=cf_vm_image_term + ) + + compute_vm_usage_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#UsageOperations.{}', + client_factory=cf_usage + ) + + compute_vm_run_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachineRunCommandsOperations.{}', + client_factory=cf_run_commands + ) + + compute_vmss_run_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachineScaleSetVmRunCommandsOperations.{}', + client_factory=cf_vmss_run_commands + ) + + compute_vmss_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#VirtualMachineScaleSetsOperations.{}', + client_factory=cf_vmss, + operation_group='virtual_machine_scale_sets' + ) + + compute_galleries_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#GalleriesOperations.{}', + client_factory=cf_galleries, + ) + + compute_gallery_images_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#GalleryImagesOperations.{}', + client_factory=cf_gallery_images, + ) + + compute_gallery_image_versions_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#GalleryImageVersionsOperations.{}', + client_factory=cf_gallery_image_versions, + ) + + compute_gallery_application_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#GalleryApplicationsOperations.{}', + client_factory=cf_gallery_application, + ) + + compute_gallery_application_version_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#GalleryApplicationVersionsOperations.{}', + client_factory=cf_gallery_application_version, + ) + + compute_proximity_placement_groups_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#ProximityPlacementGroupsOperations.{}', + ) + + compute_dedicated_host_sdk = CliCommandType( + operations_tmpl="azure.mgmt.compute.operations#DedicatedHostsOperations.{}", + client_factory=cf_dedicated_hosts, + ) + + compute_dedicated_host_groups_sdk = CliCommandType( + operations_tmpl="azure.mgmt.compute.operations#DedicatedHostGroupsOperations.{}", + client_factory=cf_dedicated_host_groups, + ) + + image_builder_image_templates_sdk = CliCommandType( + operations_tmpl="azure.mgmt.imagebuilder.operations#VirtualMachineImageTemplatesOperations.{}", + client_factory=cf_img_bldr_image_templates, + ) + + compute_disk_encryption_set_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#DiskEncryptionSetsOperations.{}', + client_factory=cf_disk_encryption_set + ) + + monitor_custom = CliCommandType( + operations_tmpl='azure.cli.command_modules.monitor.custom#{}', + exception_handler=monitor_exception_handler + ) + + metric_definitions_sdk = CliCommandType( + operations_tmpl='azure.mgmt.monitor.operations#MetricDefinitionsOperations.{}', + resource_type=ResourceType.MGMT_MONITOR, + client_factory=cf_metric_def, + operation_group='metric_definitions', + exception_handler=monitor_exception_handler + ) + + capacity_reservation_groups_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#CapacityReservationGroupsOperations.{}', + client_factory=cf_capacity_reservation_groups + ) + + capacity_reservations_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#CapacityReservationsOperations.{}', + client_factory=cf_capacity_reservations + ) + + restore_point = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#RestorePointsOperations.{}', + client_factory=cf_restore_point + ) + + restore_point_collection = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#RestorePointCollectionsOperations.{}', + client_factory=cf_restore_point_collection + ) + + community_gallery_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#CommunityGalleriesOperations.{}', + client_factory=cf_community_gallery) + + community_gallery_image_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#CommunityGalleryImagesOperations.{}', + client_factory=cf_community_gallery_image) + + community_gallery_image_version_sdk = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#CommunityGalleryImageVersionsOperations.{}', + client_factory=cf_community_gallery_image_version) + + with self.command_group('disk', compute_disk_sdk, operation_group='disks', min_api='2017-03-30') as g: + g.custom_command('create', 'create_managed_disk', supports_no_wait=True, table_transformer=transform_disk_create_table_output, validator=process_disk_create_namespace) + g.custom_command('grant-access', 'grant_disk_access') + g.generic_update_command('update', custom_func_name='update_managed_disk', setter_name='begin_create_or_update', setter_arg_name='disk', supports_no_wait=True) + + with self.command_group('disk-encryption-set', compute_disk_encryption_set_sdk, operation_group='disk_encryption_sets', client_factory=cf_disk_encryption_set, min_api='2019-07-01') as g: + g.custom_command('create', 'create_disk_encryption_set', supports_no_wait=True) + g.generic_update_command('update', custom_func_name='update_disk_encryption_set', setter_arg_name='disk_encryption_set', setter_name='begin_create_or_update') + + with self.command_group('disk-encryption-set identity', compute_disk_encryption_set_sdk, operation_group='disk_encryption_sets', client_factory=cf_disk_encryption_set, min_api='2022-03-02') as g: + g.custom_command('assign', 'assign_disk_encryption_set_identity') + g.custom_command('remove', 'remove_disk_encryption_set_identity', confirmation=True) + g.custom_show_command('show', 'show_disk_encryption_set_identity') + + with self.command_group('disk-access', compute_disk_access_sdk, operation_group='disk_accesses', client_factory=cf_disk_accesses, min_api='2020-05-01') as g: + g.custom_command('create', 'create_disk_access', supports_no_wait=True) + g.generic_update_command('update', setter_name='set_disk_access', setter_type=compute_custom, supports_no_wait=True) + + with self.command_group('image', compute_image_sdk, min_api='2016-04-30-preview') as g: + g.custom_command('create', 'create_image', validator=process_image_create_namespace) + g.generic_update_command('update', setter_name='begin_create_or_update', custom_func_name='update_image') + + with self.command_group('image builder', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('create', 'create_image_template', supports_no_wait=True, supports_local_cache=True, validator=process_image_template_create_namespace) + g.custom_command('list', 'list_image_templates') + g.show_command('show', 'get') + g.command('delete', 'begin_delete') + g.generic_update_command('update', setter_name='begin_create_or_update', supports_local_cache=True) # todo Update fails for now as service does not support updates + g.wait_command('wait') + g.command('run', 'begin_run', supports_no_wait=True) + g.custom_command('show-runs', 'show_build_output') + g.command('cancel', 'begin_cancel') + + with self.command_group('image builder identity', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('assign', 'assign_template_identity', supports_local_cache=True) + g.custom_command('remove', 'remove_template_identity', supports_local_cache=True, confirmation=True) + g.custom_show_command('show', 'show_template_identity', supports_local_cache=True) + + with self.command_group('image builder customizer', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('add', 'add_template_customizer', supports_local_cache=True, validator=process_img_tmpl_customizer_add_namespace) + g.custom_command('remove', 'remove_template_customizer', supports_local_cache=True) + g.custom_command('clear', 'clear_template_customizer', supports_local_cache=True) + + with self.command_group('image builder output', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('add', 'add_template_output', supports_local_cache=True, validator=process_img_tmpl_output_add_namespace) + g.custom_command('remove', 'remove_template_output', supports_local_cache=True) + g.custom_command('clear', 'clear_template_output', supports_local_cache=True) + + with self.command_group('image builder output versioning', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('set', 'set_template_output_versioning', supports_local_cache=True) + g.custom_command('remove', 'remove_template_output_versioning', supports_local_cache=True) + g.custom_show_command('show', 'show_template_output_versioning', supports_local_cache=True) + + with self.command_group('image builder validator', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('add', 'add_template_validator', supports_local_cache=True) + g.custom_command('remove', 'remove_template_validator', supports_local_cache=True) + g.custom_show_command('show', 'show_template_validator', supports_local_cache=True) + + with self.command_group('image builder optimizer', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('add', 'add_or_update_template_optimizer', supports_local_cache=True) + g.custom_command('update', 'add_or_update_template_optimizer', supports_local_cache=True) + g.custom_command('remove', 'remove_template_optimizer', supports_local_cache=True) + g.custom_show_command('show', 'show_template_optimizer', supports_local_cache=True) + + with self.command_group('image builder error-handler', image_builder_image_templates_sdk, custom_command_type=image_builder_custom) as g: + g.custom_command('add', 'add_template_error_handler', supports_local_cache=True) + g.custom_command('remove', 'remove_template_error_handler', supports_local_cache=True) + g.custom_show_command('show', 'show_template_error_handler', supports_local_cache=True) + + with self.command_group('snapshot', compute_snapshot_sdk, operation_group='snapshots', min_api='2016-04-30-preview') as g: + g.custom_command('create', 'create_snapshot', validator=process_snapshot_create_namespace, supports_no_wait=True) + g.custom_command('grant-access', 'grant_snapshot_access') + g.generic_update_command('update', custom_func_name='update_snapshot', setter_name='begin_create_or_update', setter_arg_name='snapshot', supports_no_wait=True) + + with self.command_group('vm', compute_vm_sdk) as g: + g.custom_command('identity assign', 'assign_vm_identity', validator=process_assign_identity_namespace) + g.custom_command('identity remove', 'remove_vm_identity', validator=process_remove_identity_namespace, min_api='2017-12-01') + g.custom_show_command('identity show', 'show_vm_identity') + + g.custom_command('application set', 'set_vm_applications', validator=process_set_applications_namespace, min_api='2021-07-01') + g.custom_command('application list', 'list_vm_applications', min_api='2021-07-01') + + g.custom_command('capture', 'capture_vm') + g.custom_command('create', 'create_vm', transform=transform_vm_create_output, supports_no_wait=True, table_transformer=deployment_validate_table_format, validator=process_vm_create_namespace, exception_handler=handle_template_based_exception) + g.command('delete', 'begin_delete', confirmation=True, supports_no_wait=True) + g.custom_command('get-instance-view', 'get_instance_view', table_transformer='{Name:name, ResourceGroup:resourceGroup, Location:location, ProvisioningState:provisioningState, PowerState:instanceView.statuses[1].displayStatus}') + g.custom_command('list', 'list_vm', table_transformer=transform_vm_list) + g.custom_command('list-ip-addresses', 'list_vm_ip_addresses', table_transformer=transform_ip_addresses) + g.custom_command('list-skus', 'list_skus', table_transformer=transform_sku_for_table_output, min_api='2017-03-30') + g.command('list-usage', 'list', command_type=compute_vm_usage_sdk, transform=transform_vm_usage_list, table_transformer='[].{Name:localName, CurrentValue:currentValue, Limit:limit}') + g.custom_command('open-port', 'open_vm_port') + g.custom_command('resize', 'resize_vm', supports_no_wait=True) + g.custom_command('restart', 'restart_vm', supports_no_wait=True) + g.custom_show_command('show', 'show_vm', table_transformer=transform_vm) + g.command('stop', 'begin_power_off', supports_no_wait=True, validator=process_vm_vmss_stop) + g.generic_update_command('update', getter_name='get_vm_to_update', setter_name='update_vm', setter_type=compute_custom, command_type=compute_custom, supports_no_wait=True, validator=process_vm_update_namespace) + g.wait_command('wait', getter_name='get_instance_view', getter_type=compute_custom) + g.custom_command('auto-shutdown', 'auto_shutdown_vm') + + with self.command_group('vm', compute_vm_sdk, client_factory=cf_vm) as g: + g.custom_command('install-patches', 'install_vm_patches', supports_no_wait=True, min_api='2020-12-01') + + with self.command_group('vm availability-set', compute_availset_sdk) as g: + g.custom_command('convert', 'convert_av_set_to_managed_disk', min_api='2016-04-30-preview') + g.custom_command('create', 'create_av_set', table_transformer=deployment_validate_table_format, supports_no_wait=True, exception_handler=handle_template_based_exception) + g.custom_command('list', 'list_av_sets') + g.generic_update_command('update', custom_func_name='update_av_set') + + with self.command_group('vm boot-diagnostics', compute_vm_sdk) as g: + g.custom_command('disable', 'disable_boot_diagnostics') + g.custom_command('enable', 'enable_boot_diagnostics') + g.custom_command('get-boot-log', 'get_boot_log') + g.custom_command('get-boot-log-uris', 'get_boot_log_uris', min_api='2020-06-01') + + with self.command_group('vm diagnostics', compute_vm_sdk) as g: + g.custom_command('set', 'set_diagnostics_extension') + g.custom_command('get-default-config', 'show_default_diagnostics_configuration') + + with self.command_group('vm disk', compute_vm_sdk, min_api='2017-03-30') as g: + g.custom_command('attach', 'attach_managed_data_disk', validator=process_vm_disk_attach_namespace) + g.custom_command('detach', 'detach_managed_data_disk', validator=process_vm_disk_detach_namespace) + + with self.command_group('vm encryption', custom_command_type=compute_disk_encryption_custom) as g: + g.custom_command('enable', 'encrypt_vm', validator=process_disk_encryption_namespace) + g.custom_command('disable', 'decrypt_vm') + g.custom_show_command('show', 'show_vm_encryption_status', table_transformer=transform_vm_encryption_show_table_output) + + with self.command_group('vm extension', compute_vm_extension_sdk) as g: + g.custom_show_command('show', 'show_extensions', table_transformer=transform_extension_show_table_output) + g.custom_command('set', 'set_extension', supports_no_wait=True) + g.custom_command('list', 'list_extensions', table_transformer='[].' + transform_extension_show_table_output) + g.wait_command('wait') + + with self.command_group('vm extension image', compute_vm_extension_image_sdk) as g: + g.custom_command('list', 'list_vm_extension_images') + + with self.command_group('vm image', compute_vm_image_sdk) as g: + g.custom_command('list-offers', 'list_offers') + g.custom_command('list-publishers', 'list_publishers') + g.custom_command('list-skus', 'list_sku') + g.custom_command('list', 'list_vm_images') + g.custom_command('accept-terms', 'accept_market_ordering_terms', + deprecate_info=g.deprecate(redirect='az vm image terms accept', expiration='3.0.0')) + g.custom_show_command('show', 'show_vm_image') + + with self.command_group('vm image terms', compute_vm_image_term_sdk, validator=None) as g: + g.custom_command('accept', 'accept_terms') + g.custom_command('cancel', 'cancel_terms') + g.custom_show_command('show', 'get_terms') + + with self.command_group('vm nic', compute_vm_sdk) as g: + g.custom_command('add', 'add_vm_nic') + g.custom_command('remove', 'remove_vm_nic') + g.custom_command('set', 'set_vm_nic') + g.custom_show_command('show', 'show_vm_nic') + g.custom_command('list', 'list_vm_nics') + + with self.command_group('vm run-command', compute_vm_run_sdk, client_factory=cf_run_commands, operation_group='virtual_machine_run_commands', min_api='2017-03-30') as g: + g.custom_command('invoke', 'vm_run_command_invoke', supports_no_wait=True) + g.custom_command('list', 'vm_run_command_list') + g.custom_show_command('show', 'vm_run_command_show') + g.custom_command('create', 'vm_run_command_create', supports_no_wait=True) + g.custom_command('update', 'vm_run_command_update', supports_no_wait=True) + g.custom_command('delete', 'vm_run_command_delete', supports_no_wait=True, confirmation=True) + g.custom_wait_command('wait', 'vm_run_command_show') + + with self.command_group('vm secret', compute_vm_sdk) as g: + g.custom_command('format', 'get_vm_format_secret', validator=process_vm_secret_format) + g.custom_command('add', 'add_vm_secret') + g.custom_command('list', 'list_vm_secrets') + g.custom_command('remove', 'remove_vm_secret') + + with self.command_group('vm unmanaged-disk', compute_vm_sdk) as g: + g.custom_command('attach', 'attach_unmanaged_data_disk') + g.custom_command('detach', 'detach_unmanaged_data_disk') + g.custom_command('list', 'list_unmanaged_disks') + + with self.command_group('vm user', compute_vm_sdk, supports_no_wait=True) as g: + g.custom_command('update', 'set_user') + g.custom_command('delete', 'delete_user') + g.custom_command('reset-ssh', 'reset_linux_ssh') + + with self.command_group('vm host', compute_dedicated_host_sdk, client_factory=cf_dedicated_hosts, + min_api='2019-03-01') as g: + g.custom_command('get-instance-view', 'get_dedicated_host_instance_view') + g.custom_command('create', 'create_dedicated_host') + g.generic_update_command('update', setter_name='begin_create_or_update') + + with self.command_group('vm host group', compute_dedicated_host_groups_sdk, client_factory=cf_dedicated_host_groups, + min_api='2019-03-01') as g: + g.custom_command('get-instance-view', 'get_dedicated_host_group_instance_view', min_api='2020-06-01') + g.custom_command('create', 'create_dedicated_host_group') + g.generic_update_command('update') + + with self.command_group('vmss', compute_vmss_sdk, operation_group='virtual_machine_scale_sets') as g: + g.custom_command('identity assign', 'assign_vmss_identity', validator=process_assign_identity_namespace) + g.custom_command('identity remove', 'remove_vmss_identity', validator=process_remove_identity_namespace, min_api='2017-12-01', is_preview=True) + g.custom_show_command('identity show', 'show_vmss_identity') + g.custom_command('application set', 'set_vmss_applications', validator=process_set_applications_namespace, min_api='2021-07-01') + g.custom_command('application list', 'list_vmss_applications', min_api='2021-07-01') + g.custom_command('create', 'create_vmss', transform=DeploymentOutputLongRunningOperation(self.cli_ctx, 'Starting vmss create'), supports_no_wait=True, table_transformer=deployment_validate_table_format, validator=process_vmss_create_namespace, exception_handler=handle_template_based_exception) + g.custom_command('deallocate', 'deallocate_vmss', supports_no_wait=True) + g.custom_command('delete-instances', 'delete_vmss_instances', supports_no_wait=True) + g.custom_command('get-instance-view', 'get_vmss_instance_view', table_transformer='{ProvisioningState:statuses[0].displayStatus, PowerState:statuses[1].displayStatus}') + g.custom_command('list-instance-connection-info', 'list_vmss_instance_connection_info') + g.custom_command('list-instance-public-ips', 'list_vmss_instance_public_ips') + g.custom_command('reimage', 'reimage_vmss', supports_no_wait=True, min_api='2017-03-30') + g.custom_command('restart', 'restart_vmss', supports_no_wait=True) + g.custom_command('scale', 'scale_vmss', supports_no_wait=True) + g.custom_show_command('show', 'get_vmss', table_transformer=get_vmss_table_output_transformer(self, False)) + g.custom_command('start', 'start_vmss', supports_no_wait=True) + g.custom_command('stop', 'stop_vmss', supports_no_wait=True, validator=process_vm_vmss_stop) + g.generic_update_command('update', getter_name='get_vmss_modified', setter_name='update_vmss', supports_no_wait=True, command_type=compute_custom, validator=validate_vmss_update_namespace) + g.custom_command('update-instances', 'update_vmss_instances', supports_no_wait=True) + g.wait_command('wait', getter_name='get_vmss', getter_type=compute_custom) + g.custom_command('set-orchestration-service-state', 'set_orchestration_service_state', supports_no_wait=True) + + with self.command_group('vmss diagnostics', compute_vmss_sdk) as g: + g.custom_command('set', 'set_vmss_diagnostics_extension') + g.custom_command('get-default-config', 'show_default_diagnostics_configuration') + + with self.command_group('vmss disk', compute_vmss_sdk, min_api='2017-03-30') as g: + g.custom_command('attach', 'attach_managed_data_disk_to_vmss') + g.custom_command('detach', 'detach_disk_from_vmss') + + with self.command_group('vmss encryption', custom_command_type=compute_disk_encryption_custom, min_api='2017-03-30') as g: + g.custom_command('enable', 'encrypt_vmss', validator=process_disk_encryption_namespace) + g.custom_command('disable', 'decrypt_vmss') + g.custom_show_command('show', 'show_vmss_encryption_status') + + with self.command_group('vmss extension', compute_vmss_sdk) as g: + g.custom_command('delete', 'delete_vmss_extension', supports_no_wait=True) + g.custom_show_command('show', 'get_vmss_extension') + g.custom_command('set', 'set_vmss_extension', supports_no_wait=True) + g.custom_command('list', 'list_vmss_extensions') + g.custom_command('upgrade', 'upgrade_vmss_extension', min_api='2020-06-01', supports_no_wait=True) + + with self.command_group('vmss extension image', compute_vm_extension_image_sdk) as g: + g.custom_command('list', 'list_vm_extension_images') + + with self.command_group('vmss run-command', compute_vmss_run_sdk, client_factory=cf_vmss_run_commands, min_api='2018-04-01') as g: + g.custom_command('invoke', 'vmss_run_command_invoke') + g.custom_command('list', 'vmss_run_command_list') + g.custom_show_command('show', 'vmss_run_command_show') + g.custom_command('create', 'vmss_run_command_create', supports_no_wait=True) + g.custom_command('update', 'vmss_run_command_update', supports_no_wait=True) + g.custom_command('delete', 'vmss_run_command_delete', supports_no_wait=True, confirmation=True) + + with self.command_group('sig', compute_galleries_sdk, operation_group='galleries', min_api='2018-06-01') as g: + g.custom_command('create', 'create_image_gallery') + g.custom_show_command('show', 'show_image_gallery') + g.generic_update_command('update', setter_type=compute_custom, setter_name='update_image_galleries', setter_arg_name='gallery') + + with self.command_group('sig', community_gallery_sdk, client_factory=cf_community_gallery, operation_group='shared_galleries', min_api='2022-01-03') as g: + g.custom_command('list-community', 'sig_community_gallery_list') + + with self.command_group('sig image-definition', community_gallery_image_sdk, client_factory=cf_community_gallery_image, operation_group='shared_galleries', min_api='2022-01-03') as g: + g.command('show-community', 'get') + g.custom_command('list-community', 'sig_community_image_definition_list') + + with self.command_group('sig image-version', community_gallery_image_version_sdk, client_factory=cf_community_gallery_image_version, operation_group='shared_galleries', min_api='2022-01-03') as g: + g.custom_command('list-community', 'sig_community_image_version_list') + + with self.command_group('sig image-definition', compute_gallery_images_sdk, operation_group='gallery_images', min_api='2018-06-01') as g: + g.custom_command('create', 'create_gallery_image') + g.generic_update_command('update', setter_name='begin_create_or_update', setter_arg_name='gallery_image') + + with self.command_group('sig image-version', compute_gallery_image_versions_sdk, operation_group='gallery_image_versions', min_api='2018-06-01') as g: + g.show_command('show', 'get', table_transformer='{Name:name, ResourceGroup:resourceGroup, ProvisioningState:provisioningState, TargetRegions: publishingProfile.targetRegions && join(`, `, publishingProfile.targetRegions[*].name), EdgeZones: publishingProfile.targetExtendedLocations && join(`, `, publishingProfile.targetExtendedLocations[*].name), ReplicationState:replicationStatus.aggregatedState}') + g.custom_command('create', 'create_image_version', supports_no_wait=True, validator=process_image_version_create_namespace) + g.custom_command('undelete', 'undelete_image_version', supports_no_wait=True, min_api='2021-07-01', validator=process_image_version_undelete_namespace, is_preview=True) + g.generic_update_command('update', getter_name='get_image_version_to_update', setter_arg_name='gallery_image_version', setter_name='update_image_version', setter_type=compute_custom, command_type=compute_custom, supports_no_wait=True, validator=process_image_version_update_namespace) + g.wait_command('wait') + + vm_shared_gallery = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations._shared_galleries_operations#SharedGalleriesOperations.{}', + client_factory=cf_shared_galleries, + operation_group='shared_galleries' + ) + with self.command_group('sig', vm_shared_gallery) as g: + g.custom_command('list-shared', 'sig_shared_gallery_list', client_factory=cf_shared_galleries, + operation_group='shared_galleries', min_api='2020-09-30') + + vm_gallery_sharing_profile = CliCommandType( + operations_tmpl=( + 'azure.mgmt.compute.operations._gallery_sharing_profile_operations#GallerySharingProfileOperations.{}' + ), + client_factory=cf_gallery_sharing_profile, + operation_group='shared_galleries' + ) + with self.command_group('sig share', vm_gallery_sharing_profile, + client_factory=cf_gallery_sharing_profile, + operation_group='shared_galleries', + min_api='2020-09-30') as g: + g.custom_command('add', 'sig_share_update', supports_no_wait=True) + g.custom_command('remove', 'sig_share_update', supports_no_wait=True) + g.custom_command('reset', 'sig_share_reset', supports_no_wait=True) + g.custom_command('enable-community', 'sig_share_update', supports_no_wait=True) + g.wait_command('wait', getter_name='get_gallery_instance', getter_type=compute_custom) + + vm_shared_gallery_image = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations._shared_gallery_images_operations#SharedGalleryImagesOperations.' + '{}', + client_factory=cf_shared_gallery_image, + operation_group='shared_galleries') + with self.command_group('sig image-definition', vm_shared_gallery_image, min_api='2020-09-30', operation_group='shared_galleries', + client_factory=cf_shared_gallery_image) as g: + g.custom_command('list-shared', 'sig_shared_image_definition_list') + + vm_shared_gallery_image_version = CliCommandType( + operations_tmpl='azure.mgmt.compute.operations#SharedGalleryImageVersionsOperations.{}', + client_factory=cf_shared_gallery_image_version, + operation_group='shared_galleries') + with self.command_group('sig image-version', vm_shared_gallery_image_version, min_api='2020-09-30', + operation_group='shared_galleries', + client_factory=cf_shared_gallery_image_version) as g: + g.custom_command('list-shared', 'sig_shared_image_version_list') + + with self.command_group('sig gallery-application', compute_gallery_application_sdk, client_factory=cf_gallery_application, min_api='2021-07-01', operation_group='gallery_applications') as g: + g.custom_command('create', 'gallery_application_create', supports_no_wait=True) + g.custom_command('update', 'gallery_application_update', supports_no_wait=True) + g.wait_command('wait') + + with self.command_group('sig gallery-application version', compute_gallery_application_version_sdk, client_factory=cf_gallery_application_version, min_api='2021-07-01', operation_group='gallery_application_versions') as g: + g.custom_command('create', 'gallery_application_version_create', supports_no_wait=True) + g.custom_command('update', 'gallery_application_version_update', supports_no_wait=True) + + with self.command_group('ppg', compute_proximity_placement_groups_sdk, min_api='2018-04-01', client_factory=cf_proximity_placement_groups) as g: + g.custom_command('create', 'create_proximity_placement_group', validator=process_ppg_create_namespace) + g.custom_command('list', 'list_proximity_placement_groups') + g.generic_update_command('update', setter_name='create_or_update', custom_func_name='update_ppg') + + with self.command_group('vm monitor log', client_factory=cf_log_analytics_data_plane) as g: + g.custom_command('show', 'execute_query_for_vm', transform=transform_log_analytics_query_output) # pylint: disable=show-command + + with self.command_group('vm monitor metrics', custom_command_type=monitor_custom, command_type=metric_definitions_sdk, resource_type=ResourceType.MGMT_MONITOR, operation_group='metric_definitions', min_api='2018-01-01', is_preview=True) as g: + from azure.cli.command_modules.monitor.transformers import metrics_table, metrics_definitions_table + from azure.cli.core.profiles._shared import APIVersionException + try: + g.custom_command('tail', 'list_metrics', command_type=monitor_custom, table_transformer=metrics_table) + g.command('list-definitions', 'list', table_transformer=metrics_definitions_table) + except APIVersionException: + pass + + with self.command_group('capacity reservation group', capacity_reservation_groups_sdk, min_api='2021-04-01', + client_factory=cf_capacity_reservation_groups) as g: + g.custom_command('create', 'create_capacity_reservation_group') + g.custom_command('update', 'update_capacity_reservation_group') + g.custom_show_command('show', 'show_capacity_reservation_group') + + with self.command_group('capacity reservation', capacity_reservations_sdk, min_api='2021-04-01', + client_factory=cf_capacity_reservations) as g: + g.custom_command('create', 'create_capacity_reservation', supports_no_wait=True) + g.custom_command('update', 'update_capacity_reservation', supports_no_wait=True) + g.custom_show_command('show', 'show_capacity_reservation') + + with self.command_group('restore-point', restore_point, client_factory=cf_restore_point, min_api='2021-03-01') as g: + g.custom_show_command('show', 'restore_point_show') + g.custom_command('create', 'restore_point_create', supports_no_wait=True) + g.wait_command('wait') + + with self.command_group('restore-point collection', restore_point_collection, min_api='2021-03-01', + client_factory=cf_restore_point_collection) as g: + g.custom_show_command('show', 'restore_point_collection_show') + g.custom_command('create', 'restore_point_collection_create') + g.custom_command('update', 'restore_point_collection_update') + g.wait_command('wait') diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/custom.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/custom.py new file mode 100644 index 00000000000..56e310f37fc --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/custom.py @@ -0,0 +1,6222 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# +# Generation mode: Incremental +# -------------------------------------------------------------------------- + +# pylint: disable=no-self-use, too-many-lines, no-else-return +# pylint: disable=protected-access +import json +import os +from urllib.parse import urlparse +# the urlopen is imported for automation purpose +from urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import,ungrouped-imports + +import requests +from knack.log import get_logger +from knack.util import CLIError + +from azure.cli.command_modules.vm.azure_stack._validators import _get_resource_group_from_vault_name +from azure.cli.core.azclierror import ( + CLIInternalError, + ResourceNotFoundError, + ValidationError, + RequiredArgumentMissingError, + ArgumentUsageError +) +from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation +from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client +from azure.cli.core.commands.validators import validate_file_or_dict +from azure.cli.core.profiles import ResourceType +from azure.cli.core.util import sdk_no_wait +from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services, + load_images_thru_services, _get_latest_image_version) +from ._client_factory import (_compute_client_factory, cf_vm_image_term, _dev_test_labs_client_factory) +from ._vm_diagnostics_templates import get_default_diag_config +from ._vm_utils import read_content_if_is_file, import_aaz_by_profile +from ..aaz.latest.vm.disk import AttachDetachDataDisk + +from ..generated.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import +try: + from ..manual.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import +except ImportError: + pass + +logger = get_logger(__name__) + +# Use the same name by portal, so people can update from both cli and portal +# (VM doesn't allow multiple handlers for the same extension) +_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess' + +_LINUX_ACCESS_EXT = 'VMAccessForLinux' +_WINDOWS_ACCESS_EXT = 'VMAccessAgent' +_LINUX_DIAG_EXT = 'LinuxDiagnostic' +_WINDOWS_DIAG_EXT = 'IaaSDiagnostics' +_LINUX_OMS_AGENT_EXT = 'OmsAgentForLinux' +_WINDOWS_OMS_AGENT_EXT = 'MicrosoftMonitoringAgent' +extension_mappings = { + _LINUX_ACCESS_EXT: { + 'version': '1.5', + 'publisher': 'Microsoft.OSTCExtensions' + }, + _WINDOWS_ACCESS_EXT: { + 'version': '2.4', + 'publisher': 'Microsoft.Compute' + }, + _LINUX_DIAG_EXT: { + 'version': '3.0', + 'publisher': 'Microsoft.Azure.Diagnostics' + }, + _WINDOWS_DIAG_EXT: { + 'version': '1.5', + 'publisher': 'Microsoft.Azure.Diagnostics' + }, + _LINUX_OMS_AGENT_EXT: { + 'version': '1.0', + 'publisher': 'Microsoft.EnterpriseCloud.Monitoring' + }, + _WINDOWS_OMS_AGENT_EXT: { + 'version': '1.0', + 'publisher': 'Microsoft.EnterpriseCloud.Monitoring' + } +} + +remove_basic_option_msg = "It's recommended to create with `%s`. " \ + "Please be aware that Basic option will be removed in the future." + + +def _construct_identity_info(identity_scope, identity_role, implicit_identity, external_identities): + info = {} + if identity_scope: + info['scope'] = identity_scope + info['role'] = str(identity_role) # could be DefaultStr, so convert to string + info['userAssignedIdentities'] = external_identities or {} + info['systemAssignedIdentity'] = implicit_identity or '' + return info + + +# for injecting test seams to produce predicatable role assignment id for playback +def _gen_guid(): + import uuid + return uuid.uuid4() + + +def _get_access_extension_upgrade_info(extensions, name): + version = extension_mappings[name]['version'] + publisher = extension_mappings[name]['publisher'] + + auto_upgrade = None + + if extensions: + extension = next((e for e in extensions if e.name == name), None) + from packaging.version import parse # pylint: disable=no-name-in-module,import-error + if extension and parse(extension.type_handler_version) < parse(version): + auto_upgrade = True + elif extension and parse(extension.type_handler_version) > parse(version): + version = extension.type_handler_version + + return publisher, version, auto_upgrade + + +def _get_extension_instance_name(instance_view, publisher, extension_type_name, + suggested_name=None): + extension_instance_name = suggested_name or extension_type_name + full_type_name = '.'.join([publisher, extension_type_name]) + if instance_view.extensions: + ext = next((x for x in instance_view.extensions + if x.type and (x.type.lower() == full_type_name.lower())), None) + if ext: + extension_instance_name = ext.name + return extension_instance_name + + +def _get_storage_management_client(cli_ctx): + return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE) + + +def _get_disk_lun(data_disks): + # start from 0, search for unused int for lun + if not data_disks: + return 0 + + existing_luns = sorted([d.lun for d in data_disks]) + for i, current in enumerate(existing_luns): + if current != i: + return i + return len(existing_luns) + + +def _get_private_config(cli_ctx, resource_group_name, storage_account): + storage_mgmt_client = _get_storage_management_client(cli_ctx) + # pylint: disable=no-member + keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys + + private_config = { + 'storageAccountName': storage_account, + 'storageAccountKey': keys[0].value + } + return private_config + + +def _get_resource_group_location(cli_ctx, resource_group_name): + client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) + # pylint: disable=no-member + return client.resource_groups.get(resource_group_name).location + + +def _get_sku_object(cmd, sku): + if cmd.supported_api_version(min_api='2017-03-30'): + DiskSku = cmd.get_models('DiskSku') + return DiskSku(name=sku) + return sku + + +def get_hyper_v_generation_from_vmss(cli_ctx, image_ref, location): # pylint: disable=too-many-return-statements + from ._vm_utils import (is_valid_image_version_id, parse_gallery_image_id, is_valid_vm_image_id, parse_vm_image_id, + parse_shared_gallery_image_id, parse_community_gallery_image_id) + if image_ref is None: + return None + if image_ref.id: + from ._client_factory import _compute_client_factory + if is_valid_image_version_id(image_ref.id): + image_info = parse_gallery_image_id(image_ref.id) + client = _compute_client_factory(cli_ctx, subscription_id=image_info[0]).gallery_images + gallery_image_info = client.get( + resource_group_name=image_info[1], gallery_name=image_info[2], gallery_image_name=image_info[3]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + if is_valid_vm_image_id(image_ref.id): + sub, rg, image_name = parse_vm_image_id(image_ref.id) + client = _compute_client_factory(cli_ctx, subscription_id=sub).images + image_info = client.get(rg, image_name) + return image_info.hyper_v_generation if hasattr(image_info, 'hyper_v_generation') else None + + if image_ref.shared_gallery_image_id is not None: + from ._client_factory import cf_shared_gallery_image + image_info = parse_shared_gallery_image_id(image_ref.shared_gallery_image_id) + gallery_image_info = cf_shared_gallery_image(cli_ctx).get( + location=location, gallery_unique_name=image_info[0], gallery_image_name=image_info[1]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + + if image_ref.community_gallery_image_id is not None: + from ._client_factory import cf_community_gallery_image + image_info = parse_community_gallery_image_id(image_ref.community_gallery_image_id) + gallery_image_info = cf_community_gallery_image(cli_ctx).get( + location=location, public_gallery_name=image_info[0], gallery_image_name=image_info[1]) + return gallery_image_info.hyper_v_generation if hasattr(gallery_image_info, 'hyper_v_generation') else None + + if image_ref.offer and image_ref.publisher and image_ref.sku and image_ref.version: + from ._client_factory import cf_vm_image + version = image_ref.version + if version.lower() == 'latest': + from ._actions import _get_latest_image_version + version = _get_latest_image_version(cli_ctx, location, image_ref.publisher, image_ref.offer, + image_ref.sku) + vm_image_info = cf_vm_image(cli_ctx, '').get( + location, image_ref.publisher, image_ref.offer, image_ref.sku, version) + return vm_image_info.hyper_v_generation if hasattr(vm_image_info, 'hyper_v_generation') else None + + return None + + +def _grant_access(cmd, resource_group_name, name, duration_in_seconds, is_disk, access_level, + secure_vm_guest_state_sas=None, file_format=None): + AccessLevel, GrantAccessData = cmd.get_models('AccessLevel', 'GrantAccessData') + client = _compute_client_factory(cmd.cli_ctx) + op = client.disks if is_disk else client.snapshots + grant_access_data = GrantAccessData(access=access_level or AccessLevel.read, + duration_in_seconds=duration_in_seconds) + if secure_vm_guest_state_sas: + grant_access_data.get_secure_vm_guest_state_sas = secure_vm_guest_state_sas + if file_format: + grant_access_data.file_format = file_format + + return op.begin_grant_access(resource_group_name, name, grant_access_data) + + +def _is_linux_os(vm): + os_type = None + if vm and vm.storage_profile and vm.storage_profile.os_disk and vm.storage_profile.os_disk.os_type: + os_type = vm.storage_profile.os_disk.os_type + if os_type: + return os_type.lower() == 'linux' + # the os_type could be None for VM scaleset, let us check out os configurations + if vm.os_profile.linux_configuration: + return bool(vm.os_profile.linux_configuration) + return False + + +def _merge_secrets(secrets): + """ + Merge a list of secrets. Each secret should be a dict fitting the following JSON structure: + [{ "sourceVault": { "id": "value" }, + "vaultCertificates": [{ "certificateUrl": "value", + "certificateStore": "cert store name (only on windows)"}] }] + The array of secrets is merged on sourceVault.id. + :param secrets: + :return: + """ + merged = {} + vc_name = 'vaultCertificates' + for outer in secrets: + for secret in outer: + if secret['sourceVault']['id'] not in merged: + merged[secret['sourceVault']['id']] = [] + merged[secret['sourceVault']['id']] = \ + secret[vc_name] + merged[secret['sourceVault']['id']] + + # transform the reduced map to vm format + formatted = [{'sourceVault': {'id': source_id}, + 'vaultCertificates': value} + for source_id, value in list(merged.items())] + return formatted + + +def _normalize_extension_version(cli_ctx, publisher, vm_extension_name, version, location): + def _trim_away_build_number(version): + # workaround a known issue: the version must only contain "major.minor", even though + # "extension image list" gives more detail + return '.'.join(version.split('.')[0:2]) + + if not version: + result = load_extension_images_thru_services(cli_ctx, publisher, vm_extension_name, None, location, + show_latest=True, partial_match=False) + if not result: + raise CLIError('Failed to find the latest version for the extension "{}"'.format(vm_extension_name)) + # with 'show_latest' enabled, we will only get one result. + version = result[0]['version'] + + version = _trim_away_build_number(version) + return version + + +def _parse_rg_name(strid): + '''From an ID, extract the contained (resource group, name) tuple.''' + from azure.mgmt.core.tools import parse_resource_id + parts = parse_resource_id(strid) + return (parts['resource_group'], parts['name']) + + +def _set_sku(cmd, instance, sku): + if cmd.supported_api_version(min_api='2017-03-30'): + instance.sku = cmd.get_models('DiskSku')(name=sku) + else: + instance.account_type = sku + + +def _show_missing_access_warning(resource_group, name, command): + warn = ("No access was given yet to the '{1}', because '--scope' was not provided. " + "You should setup by creating a role assignment, e.g. " + "'az role assignment create --assignee --role contributor -g {0}' " + "would let it access the current resource group. To get the pricipal id, run " + "'az {2} show -g {0} -n {1} --query \"identity.principalId\" -otsv'".format(resource_group, name, command)) + logger.warning(warn) + + +def _parse_aux_subscriptions(resource_id): + from azure.mgmt.core.tools import is_valid_resource_id, parse_resource_id + if is_valid_resource_id(resource_id): + res = parse_resource_id(resource_id) + return [res['subscription']] + return None + + +# Hide extension information from output as the info is not correct and unhelpful; also +# commands using it mean to hide the extension concept from users. +class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods + pass + + +# region Disks (Managed) +def create_managed_disk(cmd, resource_group_name, disk_name, location=None, + # pylint: disable=too-many-locals, too-many-branches, too-many-statements, line-too-long + size_gb=None, sku='Premium_LRS', os_type=None, + source=None, for_upload=None, upload_size_bytes=None, # pylint: disable=unused-argument + # below are generated internally from 'source' + source_blob_uri=None, source_disk=None, source_snapshot=None, source_restore_point=None, + source_storage_account_id=None, no_wait=False, tags=None, zone=None, + disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None, + encryption_type=None, disk_encryption_set=None, max_shares=None, + disk_iops_read_only=None, disk_mbps_read_only=None, + image_reference=None, image_reference_lun=None, + gallery_image_reference=None, gallery_image_reference_lun=None, + network_access_policy=None, disk_access=None, logical_sector_size=None, + tier=None, enable_bursting=None, edge_zone=None, security_type=None, support_hibernation=None, + public_network_access=None, accelerated_network=None, architecture=None, + data_access_auth_mode=None, gallery_image_reference_type=None, security_data_uri=None, + upload_type=None, secure_vm_disk_encryption_set=None, performance_plus=None, + optimized_for_frequent_attach=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + Disk, CreationData, DiskCreateOption, Encryption = cmd.get_models( + 'Disk', 'CreationData', 'DiskCreateOption', 'Encryption') + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + if security_data_uri: + option = getattr(DiskCreateOption, 'import_secure') + elif source_blob_uri: + option = getattr(DiskCreateOption, 'import_enum') + elif source_disk or source_snapshot: + option = getattr(DiskCreateOption, 'copy') + elif source_restore_point: + option = getattr(DiskCreateOption, 'restore') + elif upload_type == 'Upload': + option = getattr(DiskCreateOption, 'upload') + elif upload_type == 'UploadWithSecurityData': + option = getattr(DiskCreateOption, 'upload_prepared_secure') + elif image_reference or gallery_image_reference: + option = getattr(DiskCreateOption, 'from_image') + else: + option = getattr(DiskCreateOption, 'empty') + + if source_storage_account_id is None and source_blob_uri is not None: + subscription_id = get_subscription_id(cmd.cli_ctx) + storage_account_name = source_blob_uri.split('.')[0].split('/')[-1] + source_storage_account_id = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name) + + if upload_size_bytes is not None and not upload_type: + raise RequiredArgumentMissingError( + 'usage error: --upload-size-bytes should be used together with --upload-type') + + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT + if image_reference is not None: + if not is_valid_resource_id(image_reference): + # URN or name + terms = image_reference.split(':') + if len(terms) == 4: # URN + disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3] + if disk_version.lower() == 'latest': + disk_version = _get_latest_image_version(cmd.cli_ctx, location, disk_publisher, disk_offer, + disk_sku) + else: # error + raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).') + else: + from azure.mgmt.core.tools import parse_resource_id + terms = parse_resource_id(image_reference) + disk_publisher, disk_offer, disk_sku, disk_version = \ + terms['child_name_1'], terms['child_name_3'], terms['child_name_4'], terms['child_name_5'] + + client = _compute_client_factory(cmd.cli_ctx) + response = client.virtual_machine_images.get(location=location, publisher_name=disk_publisher, + offer=disk_offer, skus=disk_sku, version=disk_version) + + if hasattr(response, 'hyper_v_generation'): + if response.hyper_v_generation == 'V1': + logger.warning(UPGRADE_SECURITY_HINT) + elif response.hyper_v_generation == 'V2': + # set default value of hyper_v_generation + if hyper_v_generation == 'V1': + hyper_v_generation = 'V2' + # set default value of security_type + if not security_type: + security_type = 'TrustedLaunch' + if security_type != 'TrustedLaunch': + logger.warning(UPGRADE_SECURITY_HINT) + + # image_reference is an ID now + image_reference = {'id': response.id} + if image_reference_lun is not None: + image_reference['lun'] = image_reference_lun + + if gallery_image_reference is not None: + if not security_type: + security_type = 'Standard' + if security_type != 'TrustedLaunch': + logger.warning(UPGRADE_SECURITY_HINT) + + key = gallery_image_reference_type if gallery_image_reference_type else 'id' + gallery_image_reference = {key: gallery_image_reference} + if gallery_image_reference_lun is not None: + gallery_image_reference['lun'] = gallery_image_reference_lun + + creation_data = CreationData(create_option=option, source_uri=source_blob_uri, + image_reference=image_reference, gallery_image_reference=gallery_image_reference, + source_resource_id=source_disk or source_snapshot or source_restore_point, + storage_account_id=source_storage_account_id, + upload_size_bytes=upload_size_bytes, + logical_sector_size=logical_sector_size, + security_data_uri=security_data_uri, + performance_plus=performance_plus) + + if size_gb is None and option == DiskCreateOption.empty: + raise RequiredArgumentMissingError( + 'usage error: --size-gb is required to create an empty disk') + if upload_size_bytes is None and upload_type: + raise RequiredArgumentMissingError( + 'usage error: --upload-size-bytes is required to create a disk for upload') + + if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set): + disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) + + if disk_access is not None and not is_valid_resource_id(disk_access): + disk_access = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) + + if secure_vm_disk_encryption_set is not None and not is_valid_resource_id(secure_vm_disk_encryption_set): + secure_vm_disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=secure_vm_disk_encryption_set) + + encryption = None + if disk_encryption_set or encryption_type: + encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set) + + disk = Disk(location=location, creation_data=creation_data, tags=(tags or {}), + sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, os_type=os_type, encryption=encryption) + + if hyper_v_generation: + disk.hyper_v_generation = hyper_v_generation + + if zone: + disk.zones = zone + if disk_iops_read_write is not None: + disk.disk_iops_read_write = disk_iops_read_write + if disk_mbps_read_write is not None: + disk.disk_m_bps_read_write = disk_mbps_read_write + if max_shares is not None: + disk.max_shares = max_shares + if disk_iops_read_only is not None: + disk.disk_iops_read_only = disk_iops_read_only + if disk_mbps_read_only is not None: + disk.disk_m_bps_read_only = disk_mbps_read_only + if network_access_policy is not None: + disk.network_access_policy = network_access_policy + if disk_access is not None: + disk.disk_access_id = disk_access + if tier is not None: + disk.tier = tier + if enable_bursting is not None: + disk.bursting_enabled = enable_bursting + if edge_zone is not None: + disk.extended_location = edge_zone + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior + # after changing the default values to Trusted Launch VMs in the future. + if security_type and security_type != COMPATIBLE_SECURITY_TYPE_VALUE: + disk.security_profile = {'securityType': security_type} + if secure_vm_disk_encryption_set: + disk.security_profile['secure_vm_disk_encryption_set_id'] = secure_vm_disk_encryption_set + if support_hibernation is not None: + disk.supports_hibernation = support_hibernation + if public_network_access is not None: + disk.public_network_access = public_network_access + if accelerated_network is not None or architecture is not None: + if disk.supported_capabilities is None: + supportedCapabilities = cmd.get_models('SupportedCapabilities')(accelerated_network=accelerated_network, + architecture=architecture) + disk.supported_capabilities = supportedCapabilities + else: + disk.supported_capabilities.accelerated_network = accelerated_network + disk.supported_capabilities.architecture = architecture + if data_access_auth_mode is not None: + disk.data_access_auth_mode = data_access_auth_mode + if optimized_for_frequent_attach is not None: + disk.optimized_for_frequent_attach = optimized_for_frequent_attach + + client = _compute_client_factory(cmd.cli_ctx) + return sdk_no_wait(no_wait, client.disks.begin_create_or_update, resource_group_name, disk_name, disk) + + +def grant_disk_access(cmd, resource_group_name, disk_name, duration_in_seconds, access_level=None, + secure_vm_guest_state_sas=None): + return _grant_access(cmd, resource_group_name, disk_name, duration_in_seconds, is_disk=True, + access_level=access_level, secure_vm_guest_state_sas=secure_vm_guest_state_sas) + + +def update_managed_disk(cmd, resource_group_name, instance, size_gb=None, sku=None, disk_iops_read_write=None, + # pylint: disable=too-many-branches + disk_mbps_read_write=None, encryption_type=None, disk_encryption_set=None, + network_access_policy=None, disk_access=None, max_shares=None, disk_iops_read_only=None, + disk_mbps_read_only=None, enable_bursting=None, public_network_access=None, + accelerated_network=None, architecture=None, data_access_auth_mode=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + if size_gb is not None: + instance.disk_size_gb = size_gb + if sku is not None: + _set_sku(cmd, instance, sku) + if disk_iops_read_write is not None: + instance.disk_iops_read_write = disk_iops_read_write + if disk_mbps_read_write is not None: + instance.disk_m_bps_read_write = disk_mbps_read_write + if disk_iops_read_only is not None: + instance.disk_iops_read_only = disk_iops_read_only + if disk_mbps_read_only is not None: + instance.disk_m_bps_read_only = disk_mbps_read_only + if max_shares is not None: + instance.max_shares = max_shares + if disk_encryption_set is not None: + if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \ + encryption_type != 'EncryptionAtRestWithCustomerKey': + raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey') + if not is_valid_resource_id(disk_encryption_set): + disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) + instance.encryption.disk_encryption_set_id = disk_encryption_set + if encryption_type is not None: + instance.encryption.type = encryption_type + if encryption_type != 'EncryptionAtRestWithCustomerKey': + instance.encryption.disk_encryption_set_id = None + if network_access_policy is not None: + instance.network_access_policy = network_access_policy + if disk_access is not None: + if not is_valid_resource_id(disk_access): + disk_access = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) + instance.disk_access_id = disk_access + if enable_bursting is not None: + instance.bursting_enabled = enable_bursting + if public_network_access is not None: + instance.public_network_access = public_network_access + if accelerated_network is not None or architecture is not None: + if instance.supported_capabilities is None: + supportedCapabilities = cmd.get_models('SupportedCapabilities')(accelerated_network=accelerated_network, + architecture=architecture) + instance.supported_capabilities = supportedCapabilities + else: + instance.supported_capabilities.accelerated_network = accelerated_network + instance.supported_capabilities.architecture = architecture + if data_access_auth_mode is not None: + instance.data_access_auth_mode = data_access_auth_mode + + return instance + + +# endregion + + +# region Images (Managed) +def create_image(cmd, resource_group_name, name, source, os_type=None, data_disk_sources=None, location=None, + # pylint: disable=too-many-locals,unused-argument + # below are generated internally from 'source' and 'data_disk_sources' + source_virtual_machine=None, storage_sku=None, hyper_v_generation=None, + os_blob_uri=None, data_blob_uris=None, + os_snapshot=None, data_snapshots=None, + os_disk=None, os_disk_caching=None, data_disks=None, data_disk_caching=None, + tags=None, zone_resilient=None, edge_zone=None): + ImageOSDisk, ImageDataDisk, ImageStorageProfile, Image, SubResource, OperatingSystemStateTypes = cmd.get_models( + 'ImageOSDisk', 'ImageDataDisk', 'ImageStorageProfile', 'Image', 'SubResource', 'OperatingSystemStateTypes') + + if source_virtual_machine: + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + image_storage_profile = None if zone_resilient is None else ImageStorageProfile(zone_resilient=zone_resilient) + image = Image(location=location, source_virtual_machine=SubResource(id=source_virtual_machine), + storage_profile=image_storage_profile, tags=(tags or {})) + else: + os_disk = ImageOSDisk(os_type=os_type, + os_state=OperatingSystemStateTypes.generalized, + caching=os_disk_caching, + snapshot=SubResource(id=os_snapshot) if os_snapshot else None, + managed_disk=SubResource(id=os_disk) if os_disk else None, + blob_uri=os_blob_uri, + storage_account_type=storage_sku) + all_data_disks = [] + lun = 0 + if data_blob_uris: + for d in data_blob_uris: + all_data_disks.append(ImageDataDisk(lun=lun, blob_uri=d, caching=data_disk_caching)) + lun += 1 + if data_snapshots: + for d in data_snapshots: + all_data_disks.append(ImageDataDisk(lun=lun, snapshot=SubResource(id=d), caching=data_disk_caching)) + lun += 1 + if data_disks: + for d in data_disks: + all_data_disks.append(ImageDataDisk(lun=lun, managed_disk=SubResource(id=d), caching=data_disk_caching)) + lun += 1 + + image_storage_profile = ImageStorageProfile(os_disk=os_disk, data_disks=all_data_disks) + if zone_resilient is not None: + image_storage_profile.zone_resilient = zone_resilient + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + # pylint disable=no-member + image = Image(location=location, storage_profile=image_storage_profile, tags=(tags or {})) + + if hyper_v_generation: + image.hyper_v_generation = hyper_v_generation + + if edge_zone: + image.extended_location = edge_zone + + client = _compute_client_factory(cmd.cli_ctx) + return client.images.begin_create_or_update(resource_group_name, name, image) + + +def update_image(instance, tags=None): + if tags is not None: + instance.tags = tags + return instance + + +# region Snapshots +# pylint: disable=unused-argument,too-many-locals +def create_snapshot(cmd, resource_group_name, snapshot_name, location=None, size_gb=None, sku='Standard_LRS', + source=None, for_upload=None, copy_start=None, incremental=None, + # below are generated internally from 'source' + source_blob_uri=None, source_disk=None, source_snapshot=None, source_storage_account_id=None, + hyper_v_generation=None, tags=None, no_wait=False, disk_encryption_set=None, + encryption_type=None, network_access_policy=None, disk_access=None, edge_zone=None, + public_network_access=None, accelerated_network=None, architecture=None, + elastic_san_resource_id=None, bandwidth_copy_speed=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + Snapshot, CreationData, DiskCreateOption, Encryption = cmd.get_models( + 'Snapshot', 'CreationData', 'DiskCreateOption', 'Encryption') + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + if source_blob_uri: + option = getattr(DiskCreateOption, 'import_enum') + elif source_disk or source_snapshot: + option = getattr(DiskCreateOption, 'copy') + if cmd.supported_api_version(min_api='2021-04-01', operation_group='snapshots'): + option = getattr(DiskCreateOption, 'copy_start') if copy_start else getattr(DiskCreateOption, 'copy') + elif for_upload: + option = getattr(DiskCreateOption, 'upload') + elif elastic_san_resource_id: + option = getattr(DiskCreateOption, 'copy_from_san_snapshot') + else: + option = getattr(DiskCreateOption, 'empty') + + creation_data = CreationData(create_option=option, source_uri=source_blob_uri, + image_reference=None, + source_resource_id=source_disk or source_snapshot, + storage_account_id=source_storage_account_id, + elastic_san_resource_id=elastic_san_resource_id, + provisioned_bandwidth_copy_speed=bandwidth_copy_speed) + + if size_gb is None and option == DiskCreateOption.empty: + raise CLIError('Please supply size for the snapshots') + + if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set): + disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) + + if disk_access is not None and not is_valid_resource_id(disk_access): + disk_access = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) + + if disk_encryption_set is not None and encryption_type is None: + raise CLIError('usage error: Please specify --encryption-type.') + if encryption_type is not None: + encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set) + else: + encryption = None + + snapshot = Snapshot(location=location, creation_data=creation_data, tags=(tags or {}), + sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, incremental=incremental, + encryption=encryption) + if hyper_v_generation: + snapshot.hyper_v_generation = hyper_v_generation + if network_access_policy is not None: + snapshot.network_access_policy = network_access_policy + if disk_access is not None: + snapshot.disk_access_id = disk_access + if edge_zone: + snapshot.extended_location = edge_zone + if public_network_access is not None: + snapshot.public_network_access = public_network_access + if accelerated_network is not None or architecture is not None: + if snapshot.supported_capabilities is None: + supportedCapabilities = cmd.get_models('SupportedCapabilities')(accelerated_network=accelerated_network, + architecture=architecture) + snapshot.supported_capabilities = supportedCapabilities + else: + snapshot.supported_capabilities.accelerated_network = accelerated_network + snapshot.supported_capabilities.architecture = architecture + + client = _compute_client_factory(cmd.cli_ctx) + return sdk_no_wait(no_wait, client.snapshots.begin_create_or_update, resource_group_name, snapshot_name, snapshot) + + +def grant_snapshot_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, + access_level=None, file_format=None): + return _grant_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, is_disk=False, + access_level=access_level, file_format=file_format) + + +def update_snapshot(cmd, resource_group_name, instance, sku=None, disk_encryption_set=None, + encryption_type=None, network_access_policy=None, disk_access=None, public_network_access=None, + accelerated_network=None, architecture=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + if sku is not None: + _set_sku(cmd, instance, sku) + if disk_encryption_set is not None: + if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \ + encryption_type != 'EncryptionAtRestWithCustomerKey': + raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey') + if not is_valid_resource_id(disk_encryption_set): + disk_encryption_set = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set) + instance.encryption.disk_encryption_set_id = disk_encryption_set + if encryption_type is not None: + instance.encryption.type = encryption_type + if network_access_policy is not None: + instance.network_access_policy = network_access_policy + if disk_access is not None and not is_valid_resource_id(disk_access): + disk_access = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskAccesses', name=disk_access) + instance.disk_access_id = disk_access + if public_network_access is not None: + instance.public_network_access = public_network_access + if accelerated_network is not None or architecture is not None: + if instance.supported_capabilities is None: + supportedCapabilities = cmd.get_models('SupportedCapabilities')(accelerated_network=accelerated_network, + architecture=architecture) + instance.supported_capabilities = supportedCapabilities + else: + instance.supported_capabilities.accelerated_network = accelerated_network + instance.supported_capabilities.architecture = architecture + return instance + + +# endregion + + +# region VirtualMachines Identity +def show_vm_identity(cmd, resource_group_name, vm_name): + client = _compute_client_factory(cmd.cli_ctx) + return client.virtual_machines.get(resource_group_name, vm_name).identity + + +def show_vmss_identity(cmd, resource_group_name, vm_name): + client = _compute_client_factory(cmd.cli_ctx) + return client.virtual_machine_scale_sets.get(resource_group_name, vm_name).identity + + +def assign_vm_identity(cmd, resource_group_name, vm_name, assign_identity=None, identity_role=None, + identity_role_id=None, identity_scope=None): + VirtualMachineIdentity, ResourceIdentityType, VirtualMachineUpdate = cmd.get_models('VirtualMachineIdentity', + 'ResourceIdentityType', + 'VirtualMachineUpdate') + UserAssignedIdentitiesValue = cmd.get_models('UserAssignedIdentitiesValue') + from azure.cli.core.commands.arm import assign_identity as assign_identity_helper + client = _compute_client_factory(cmd.cli_ctx) + _, _, external_identities, enable_local_identity = _build_identities_info(assign_identity) + + def getter(): + return client.virtual_machines.get(resource_group_name, vm_name) + + def setter(vm, external_identities=external_identities): + if vm.identity and vm.identity.type == ResourceIdentityType.system_assigned_user_assigned: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif vm.identity and vm.identity.type == ResourceIdentityType.system_assigned and external_identities: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif vm.identity and vm.identity.type == ResourceIdentityType.user_assigned and enable_local_identity: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif external_identities and enable_local_identity: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif external_identities: + identity_types = ResourceIdentityType.user_assigned + else: + identity_types = ResourceIdentityType.system_assigned + + vm.identity = VirtualMachineIdentity(type=identity_types) + if external_identities: + vm.identity.user_assigned_identities = {} + if not cmd.supported_api_version(min_api='2018-06-01', resource_type=ResourceType.MGMT_COMPUTE): + raise CLIInternalError("Usage error: user assigned identity is not available under current profile.", + "You can set the cloud's profile to latest with 'az cloud set --profile latest" + " --name '") + for identity in external_identities: + vm.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue() + + vm_patch = VirtualMachineUpdate() + vm_patch.identity = vm.identity + return patch_vm(cmd, resource_group_name, vm_name, vm_patch) + + assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope) + vm = client.virtual_machines.get(resource_group_name, vm_name) + return _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id, + vm.identity.user_assigned_identities) + + +# endregion + + +# region VirtualMachines +def capture_vm(cmd, resource_group_name, vm_name, vhd_name_prefix, + storage_container='vhds', overwrite=True): + VirtualMachineCaptureParameters = cmd.get_models('VirtualMachineCaptureParameters') + client = _compute_client_factory(cmd.cli_ctx) + parameter = VirtualMachineCaptureParameters(vhd_prefix=vhd_name_prefix, + destination_container_name=storage_container, + overwrite_vhds=overwrite) + poller = client.virtual_machines.begin_capture(resource_group_name, vm_name, parameter) + result = LongRunningOperation(cmd.cli_ctx)(poller) + output = getattr(result, 'output', None) or result.resources[0] + print(json.dumps(output, indent=2)) # pylint: disable=no-member + + +# pylint: disable=too-many-locals, unused-argument, too-many-statements, too-many-branches, broad-except +def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_v2', location=None, tags=None, + no_wait=False, authentication_type=None, admin_password=None, computer_name=None, + admin_username=None, ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False, + availability_set=None, nics=None, nsg=None, nsg_rule=None, accelerated_networking=None, + private_ip_address=None, public_ip_address=None, public_ip_address_allocation='dynamic', + public_ip_address_dns_name=None, public_ip_sku=None, os_disk_name=None, os_type=None, + storage_account=None, os_caching=None, data_caching=None, storage_container_name=None, storage_sku=None, + use_unmanaged_disk=False, attach_os_disk=None, os_disk_size_gb=None, attach_data_disks=None, + data_disk_sizes_gb=None, disk_info=None, + vnet_name=None, vnet_address_prefix='10.0.0.0/16', subnet=None, subnet_address_prefix='10.0.0.0/24', + storage_profile=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None, + storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_address_type=None, nic_type=None, + validate=False, custom_data=None, secrets=None, plan_name=None, plan_product=None, plan_publisher=None, + plan_promotion_code=None, license_type=None, assign_identity=None, identity_scope=None, + identity_role=None, identity_role_id=None, application_security_groups=None, zone=None, + boot_diagnostics_storage=None, ultra_ssd_enabled=None, + ephemeral_os_disk=None, ephemeral_os_disk_placement=None, + proximity_placement_group=None, dedicated_host=None, dedicated_host_group=None, aux_subscriptions=None, + priority=None, max_price=None, eviction_policy=None, enable_agent=None, workspace=None, vmss=None, + os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None, + encryption_at_host=None, enable_auto_update=None, patch_mode=None, ssh_key_name=None, + enable_hotpatching=None, platform_fault_domain=None, security_type=None, enable_secure_boot=None, + enable_vtpm=None, count=None, edge_zone=None, nic_delete_option=None, os_disk_delete_option=None, + data_disk_delete_option=None, user_data=None, capacity_reservation_group=None, enable_hibernation=None, + v_cpus_available=None, v_cpus_per_core=None, accept_term=None, + disable_integrity_monitoring=None, # Unused + enable_integrity_monitoring=False, + os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, + disk_controller_type=None, disable_integrity_monitoring_autoupgrade=False, enable_proxy_agent=None, + proxy_agent_mode=None, source_snapshots_or_disks=None, source_snapshots_or_disks_size_gb=None, + source_disk_restore_point=None, source_disk_restore_point_size_gb=None, ssh_key_type=None): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.cli.core.util import random_string, hash_string + from azure.cli.core.commands.arm import ArmTemplateBuilder + # pylint: disable=line-too-long + from azure.cli.command_modules.vm.azure_stack._template_builder import (build_vm_resource, + build_storage_account_resource, + build_nic_resource, + build_vnet_resource, build_nsg_resource, + build_public_ip_resource, StorageProfile, + build_msi_role_assignment, + build_vm_linux_log_analytics_workspace_agent, + build_vm_windows_log_analytics_workspace_agent) + from azure.cli.command_modules.vm.azure_stack._vm_utils import ArmTemplateBuilder20190401 + from azure.mgmt.core.tools import resource_id, is_valid_resource_id, parse_resource_id + + # In the latest profile, the default public IP will be expected to be changed from Basic to Standard, + # and Basic option will be removed. + # In order to avoid breaking change which has a big impact to users, + # we use the hint to guide users to use Standard public IP to create VM in the first stage. + if cmd.cli_ctx.cloud.profile == 'latest': + if public_ip_sku == "Basic": + logger.warning(remove_basic_option_msg, "--public-ip-sku Standard") + + subscription_id = get_subscription_id(cmd.cli_ctx) + if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set): + os_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set) + if os_disk_secure_vm_disk_encryption_set is not None and \ + not is_valid_resource_id(os_disk_secure_vm_disk_encryption_set): + os_disk_secure_vm_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_secure_vm_disk_encryption_set) + + if data_disk_encryption_sets is None: + data_disk_encryption_sets = [] + for i, des in enumerate(data_disk_encryption_sets): + if des is not None and not is_valid_resource_id(des): + data_disk_encryption_sets[i] = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=des) + + storage_sku = disk_info['os'].get('storageAccountType') + + network_id_template = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Network') + + vm_id = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachines', name=vm_name) + + # determine final defaults and calculated values + tags = tags or {} + os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vm_id, length=10)) if use_unmanaged_disk else None) + storage_container_name = storage_container_name or 'vhds' + + # Build up the ARM template + if count is None: + master_template = ArmTemplateBuilder() + else: + master_template = ArmTemplateBuilder20190401() + + vm_dependencies = [] + if storage_account_type == 'new': + storage_account = storage_account or 'vhdstorage{}'.format( + hash_string(vm_id, length=14, force_lower=True)) + vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account)) + master_template.add_resource(build_storage_account_resource(cmd, storage_account, location, + tags, storage_sku, edge_zone)) + + nic_name = None + if nic_type == 'new': + nic_name = '{}VMNic'.format(vm_name) + nic_full_name = 'Microsoft.Network/networkInterfaces/{}'.format(nic_name) + if count: + vm_dependencies.extend([nic_full_name + str(i) for i in range(count)]) + else: + vm_dependencies.append(nic_full_name) + + nic_dependencies = [] + if vnet_type == 'new': + subnet = subnet or '{}Subnet'.format(vm_name) + vnet_exists = False + if vnet_name: + from azure.cli.command_modules.vm.azure_stack._vm_utils import check_existence + vnet_exists = \ + check_existence(cmd.cli_ctx, vnet_name, resource_group_name, 'Microsoft.Network', 'virtualNetworks') + if vnet_exists: + SubnetCreate = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet.subnet").Create + try: + poller = SubnetCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': subnet, + 'vnet_name': vnet_name, + 'resource_group': resource_group_name, + 'address_prefixes': [subnet_address_prefix], + 'address_prefix': subnet_address_prefix + }) + LongRunningOperation(cmd.cli_ctx)(poller) + except Exception: + raise CLIError('Subnet({}) does not exist, but failed to create a new subnet with address ' + 'prefix {}. It may be caused by name or address prefix conflict. Please specify ' + 'an appropriate subnet name with --subnet or a valid address prefix value with ' + '--subnet-address-prefix.'.format(subnet, subnet_address_prefix)) + if not vnet_exists: + vnet_name = vnet_name or '{}VNET'.format(vm_name) + nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + master_template.add_resource(build_vnet_resource(cmd, vnet_name, location, tags, vnet_address_prefix, + subnet, subnet_address_prefix, edge_zone=edge_zone)) + + if nsg_type == 'new': + if nsg_rule is None: + nsg_rule = 'RDP' if os_type.lower() == 'windows' else 'SSH' + nsg = nsg or '{}NSG'.format(vm_name) + nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg)) + master_template.add_resource(build_nsg_resource(cmd, nsg, location, tags, nsg_rule)) + + if public_ip_address_type == 'new': + public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name) + public_ip_address_full_name = 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address) + if count: + nic_dependencies.extend([public_ip_address_full_name + str(i) for i in range(count)]) + else: + nic_dependencies.append(public_ip_address_full_name) + master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location, tags, + public_ip_address_allocation, + public_ip_address_dns_name, + public_ip_sku, zone, count, edge_zone)) + + subnet_id = subnet if is_valid_resource_id(subnet) else \ + '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet) + + nsg_id = None + if nsg: + nsg_id = nsg if is_valid_resource_id(nsg) else \ + '{}/networkSecurityGroups/{}'.format(network_id_template, nsg) + + public_ip_address_id = None + if public_ip_address: + public_ip_address_id = public_ip_address if is_valid_resource_id(public_ip_address) \ + else '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address) + + nics_id = '{}/networkInterfaces/{}'.format(network_id_template, nic_name) + + if count: + nics = [ + { + 'id': "[concat('{}', copyIndex())]".format(nics_id), + 'properties': { + 'deleteOption': nic_delete_option + } + } + ] + else: + nics = [ + { + 'id': nics_id, + 'properties': { + 'deleteOption': nic_delete_option + } + } + ] + + nic_resource = build_nic_resource( + cmd, nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id, + public_ip_address_id, application_security_groups, accelerated_networking=accelerated_networking, + count=count, edge_zone=edge_zone) + nic_resource['dependsOn'] = nic_dependencies + master_template.add_resource(nic_resource) + else: + # Using an existing NIC + invalid_parameters = [nsg, public_ip_address, subnet, vnet_name, application_security_groups] + if any(invalid_parameters): + raise CLIError('When specifying an existing NIC, do not specify NSG, ' + 'public IP, ASGs, VNet or subnet.') + if accelerated_networking is not None: + logger.warning('When specifying an existing NIC, do not specify accelerated networking. ' + 'Ignore --accelerated-networking now. ' + 'This will trigger an error instead of a warning in future releases.') + + os_vhd_uri = None + if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]: + storage_account_name = storage_account.rsplit('/', 1) + storage_account_name = storage_account_name[1] if \ + len(storage_account_name) > 1 else storage_account_name[0] + os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format( + storage_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name, os_disk_name) + elif storage_profile == StorageProfile.SASpecializedOSDisk: + os_vhd_uri = attach_os_disk + os_disk_name = attach_os_disk.rsplit('/', 1)[1][:-4] + + if custom_data: + custom_data = read_content_if_is_file(custom_data) + + if user_data: + user_data = read_content_if_is_file(user_data) + + if secrets: + secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets]) + + vm_resource = build_vm_resource( + cmd=cmd, name=vm_name, location=location, tags=tags, size=size, storage_profile=storage_profile, nics=nics, + admin_username=admin_username, availability_set_id=availability_set, admin_password=admin_password, + ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, image_reference=image, + os_disk_name=os_disk_name, custom_image_os_type=os_type, authentication_type=authentication_type, + os_publisher=os_publisher, os_offer=os_offer, os_sku=os_sku, os_version=os_version, os_vhd_uri=os_vhd_uri, + attach_os_disk=attach_os_disk, os_disk_size_gb=os_disk_size_gb, custom_data=custom_data, secrets=secrets, + license_type=license_type, zone=zone, disk_info=disk_info, + boot_diagnostics_storage_uri=boot_diagnostics_storage, ultra_ssd_enabled=ultra_ssd_enabled, + proximity_placement_group=proximity_placement_group, computer_name=computer_name, + dedicated_host=dedicated_host, priority=priority, max_price=max_price, eviction_policy=eviction_policy, + enable_agent=enable_agent, vmss=vmss, os_disk_encryption_set=os_disk_encryption_set, + data_disk_encryption_sets=data_disk_encryption_sets, specialized=specialized, + encryption_at_host=encryption_at_host, dedicated_host_group=dedicated_host_group, + enable_auto_update=enable_auto_update, patch_mode=patch_mode, enable_hotpatching=enable_hotpatching, + platform_fault_domain=platform_fault_domain, security_type=security_type, enable_secure_boot=enable_secure_boot, + enable_vtpm=enable_vtpm, count=count, edge_zone=edge_zone, os_disk_delete_option=os_disk_delete_option, + user_data=user_data, capacity_reservation_group=capacity_reservation_group, + enable_hibernation=enable_hibernation, v_cpus_available=v_cpus_available, v_cpus_per_core=v_cpus_per_core, + os_disk_security_encryption_type=os_disk_security_encryption_type, + os_disk_secure_vm_disk_encryption_set=os_disk_secure_vm_disk_encryption_set, + disk_controller_type=disk_controller_type, enable_proxy_agent=enable_proxy_agent, + proxy_agent_mode=proxy_agent_mode) + + vm_resource['dependsOn'] = vm_dependencies + + if plan_name: + vm_resource['plan'] = { + 'name': plan_name, + 'publisher': plan_publisher, + 'product': plan_product, + 'promotionCode': plan_promotion_code + } + + enable_local_identity = None + if assign_identity is not None: + vm_resource['identity'], _, _, enable_local_identity = _build_identities_info(assign_identity) + role_assignment_guid = None + if identity_scope: + role_assignment_guid = str(_gen_guid()) + master_template.add_resource(build_msi_role_assignment(vm_name, vm_id, identity_role_id, + role_assignment_guid, identity_scope)) + + if workspace is not None: + workspace_id = _prepare_workspace(cmd, resource_group_name, workspace) + master_template.add_secure_parameter('workspaceId', workspace_id) + if os_type.lower() == 'linux': + vm_mmaExtension_resource = build_vm_linux_log_analytics_workspace_agent(cmd, vm_name, location) + master_template.add_resource(vm_mmaExtension_resource) + elif os_type.lower() == 'windows': + vm_mmaExtension_resource = build_vm_windows_log_analytics_workspace_agent(cmd, vm_name, location) + master_template.add_resource(vm_mmaExtension_resource) + else: + logger.warning("Unsupported OS type. Skip the connection step for log analytics workspace.") + + master_template.add_resource(vm_resource) + + if admin_password: + master_template.add_secure_parameter('adminPassword', admin_password) + + template = master_template.build() + parameters = master_template.build_parameters() + + # deploy ARM template + deployment_name = 'vm_deploy_' + random_string(32) + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, + aux_subscriptions=aux_subscriptions).deployments + DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') + Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + deployment = Deployment(properties=properties) + + if validate: + from azure.cli.command_modules.vm.azure_stack._vm_utils import log_pprint_template + log_pprint_template(template) + log_pprint_template(parameters) + + if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): + validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) + return LongRunningOperation(cmd.cli_ctx)(validation_poller) + + return client.validate(resource_group_name, deployment_name, deployment) + + # creates the VM deployment + if no_wait: + return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment) + LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(resource_group_name, deployment_name, deployment)) + + # Guest Attestation Extension and enable System Assigned MSI by default + is_trusted_launch = security_type and security_type.lower() == 'trustedlaunch' and \ + enable_vtpm and enable_secure_boot + if is_trusted_launch and enable_integrity_monitoring: + vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView') + client = _compute_client_factory(cmd.cli_ctx) + if vm.storage_profile.os_disk.os_type == 'Linux': + publisher = 'Microsoft.Azure.Security.LinuxAttestation' + if vm.storage_profile.os_disk.os_type == 'Windows': + publisher = 'Microsoft.Azure.Security.WindowsAttestation' + version = _normalize_extension_version(cmd.cli_ctx, publisher, 'GuestAttestation', None, vm.location) + VirtualMachineExtension = cmd.get_models('VirtualMachineExtension') + ext = VirtualMachineExtension(location=vm.location, + publisher=publisher, + type_properties_type='GuestAttestation', + protected_settings=None, + type_handler_version=version, + settings=None, + auto_upgrade_minor_version=True, + enable_automatic_upgrade=not disable_integrity_monitoring_autoupgrade) + try: + LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_extensions.begin_create_or_update( + resource_group_name, vm_name, 'GuestAttestation', ext)) + logger.info('Guest Attestation Extension has been successfully installed by default ' + 'when Trusted Launch configuration is met') + except Exception as e: + logger.error('Failed to install Guest Attestation Extension for Trusted Launch. %s', e) + if count: + vm_names = [vm_name + str(i) for i in range(count)] + else: + vm_names = [vm_name] + vms = [] + # Use vm_name2 to avoid R1704: Redefining argument with the local name 'vm_name' (redefined-argument-from-local) + for vm_name2 in vm_names: + vm = get_vm_details(cmd, resource_group_name, vm_name2) + if assign_identity is not None: + if enable_local_identity and not identity_scope: + _show_missing_access_warning(resource_group_name, vm_name2, 'vm') + setattr(vm, 'identity', _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id, + vm.identity.user_assigned_identities)) + vms.append(vm) + + if workspace is not None: + workspace_name = parse_resource_id(workspace_id)['name'] + _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name) + + if len(vms) == 1: + return vms[0] + return vms + + +def auto_shutdown_vm(cmd, resource_group_name, vm_name, off=None, email=None, webhook=None, time=None, + location=None): + from azure.mgmt.core.tools import resource_id + from azure.mgmt.devtestlabs.models import Schedule + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(cmd.cli_ctx) + client = _dev_test_labs_client_factory(cmd.cli_ctx, subscription_id) + name = 'shutdown-computevm-' + vm_name + vm_id = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachines', name=vm_name) + if off: + if email is not None or webhook is not None or time is not None: + # I don't want to disrupt users. So I warn instead of raising an error. + logger.warning('If --off, other parameters will be ignored.') + return client.global_schedules.delete(resource_group_name, name) + + if time is None: + raise CLIError('usage error: --time is a required parameter') + daily_recurrence = {'time': time} + notification_settings = None + if email or webhook: + notification_settings = { + 'timeInMinutes': 30, + 'status': 'Enabled' + } + if email: + notification_settings['emailRecipient'] = email + if webhook: + notification_settings['webhookUrl'] = webhook + + schedule = Schedule(status='Enabled', + target_resource_id=vm_id, + daily_recurrence=daily_recurrence, + notification_settings=notification_settings, + time_zone_id='UTC', + task_type='ComputeVmShutdownTask', + location=location) + return client.global_schedules.create_or_update(resource_group_name, name, schedule) + + +def get_instance_view(cmd, resource_group_name, vm_name, include_user_data=False): + expand = 'instanceView' + if include_user_data: + expand = expand + ',userData' + return get_vm(cmd, resource_group_name, vm_name, expand) + + +def get_vm(cmd, resource_group_name, vm_name, expand=None): + client = _compute_client_factory(cmd.cli_ctx) + return client.virtual_machines.get(resource_group_name, vm_name, expand=expand) + + +def get_vm_to_update(cmd, resource_group_name, vm_name): + client = _compute_client_factory(cmd.cli_ctx) + vm = client.virtual_machines.get(resource_group_name, vm_name) + # To avoid unnecessary permission check of image + vm.storage_profile.image_reference = None + return vm + + +def get_vm_details(cmd, resource_group_name, vm_name, include_user_data=False): + from azure.mgmt.core.tools import parse_resource_id + + NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show + PublicIPShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").Show + + result = get_instance_view(cmd, resource_group_name, vm_name, include_user_data) + public_ips = [] + fqdns = [] + private_ips = [] + mac_addresses = [] + # pylint: disable=line-too-long,no-member + for nic_ref in result.network_profile.network_interfaces: + nic_parts = parse_resource_id(nic_ref.id) + nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + "name": nic_parts['name'], + 'resource_group': nic_parts['resource_group'] + }) + if 'macAddress' in nic: + mac_addresses.append(nic['macAddress']) + for ip_configuration in nic['ipConfigurations']: + if 'privateIPAddress' in ip_configuration: + private_ips.append(ip_configuration['privateIPAddress']) + if 'publicIPAddress' in ip_configuration: + res = parse_resource_id(ip_configuration['publicIPAddress']['id']) + public_ip_info = PublicIPShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': res['name'], + 'resource_group': res['resource_group'] + }) + if 'ipAddress' in public_ip_info: + public_ips.append(public_ip_info['ipAddress']) + if 'dnsSettings' in public_ip_info: + fqdns.append(public_ip_info['dnsSettings']['fqdn']) + + setattr(result, 'power_state', + ','.join([s.display_status for s in result.instance_view.statuses if s.code.startswith('PowerState/')])) + setattr(result, 'public_ips', ','.join(public_ips)) + setattr(result, 'fqdns', ','.join(fqdns)) + setattr(result, 'private_ips', ','.join(private_ips)) + setattr(result, 'mac_addresses', ','.join(mac_addresses)) + del result.instance_view # we don't need other instance_view info as people won't care + return result + + +def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None): + from ._vm_utils import list_sku_info, is_sku_available + result = list_sku_info(cmd.cli_ctx, location) + # pylint: disable=too-many-nested-blocks + if not show_all: + available_skus = [] + for sku_info in result: + if is_sku_available(cmd, sku_info, zone): + available_skus.append(sku_info) + result = available_skus + if resource_type: + result = [x for x in result if x.resource_type.lower() == resource_type.lower()] + if size: + result = [x for x in result if x.resource_type == 'virtualMachines' and size.lower() in x.name.lower()] + if zone: + result = [x for x in result if x.location_info and x.location_info[0].zones] + return result + + +# pylint: disable=redefined-builtin +def list_vm(cmd, resource_group_name=None, show_details=False, vmss=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id, parse_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + ccf = _compute_client_factory(cmd.cli_ctx) + if vmss is not None: + if is_valid_resource_id(vmss): + filter = "'virtualMachineScaleSet/id' eq '{}'".format(vmss) + if resource_group_name is None: + resource_group_name = parse_resource_id(vmss)['resource_group'] + else: + if resource_group_name is None: + raise RequiredArgumentMissingError( + 'usage error: please specify the --resource-group when listing VM instances with VMSS name') + vmss_id = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss) + filter = "'virtualMachineScaleSet/id' eq '{}'".format(vmss_id) + vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name, filter=filter) + else: + vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name) \ + if resource_group_name else ccf.virtual_machines.list_all() + if show_details: + return [get_vm_details(cmd, _parse_rg_name(v.id)[0], v.name) for v in vm_list] + + return list(vm_list) + + +def list_vm_ip_addresses(cmd, resource_group_name=None, vm_name=None): + # We start by getting NICs as they are the smack in the middle of all data that we + # want to collect for a VM (as long as we don't need any info on the VM than what + # is available in the Id, we don't need to make any calls to the compute RP) + # + # Since there is no guarantee that a NIC is in the same resource group as a given + # Virtual Machine, we can't constrain the lookup to only a single group... + NicList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").List + PublicIPList = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").List + + nics = NicList(cli_ctx=cmd.cli_ctx)(command_args={}) + public_ip_addresses = PublicIPList(cli_ctx=cmd.cli_ctx)(command_args={}) + + ip_address_lookup = {pip['id']: pip for pip in list(public_ip_addresses)} + + result = [] + for nic in [n for n in list(nics) if 'virtualMachine' in n and n['virtualMachine']]: + nic_resource_group, nic_vm_name = _parse_rg_name(nic['virtualMachine']['id']) + + # If provided, make sure that resource group name and vm name match the NIC we are + # looking at before adding it to the result... + same_resource_group_name = (resource_group_name is None or + resource_group_name.lower() == nic_resource_group.lower()) + same_vm_name = (vm_name is None or + vm_name.lower() == nic_vm_name.lower()) + if same_resource_group_name and same_vm_name: + network_info = { + 'privateIpAddresses': [], + 'publicIpAddresses': [] + } + for ip_configuration in nic['ipConfigurations']: + network_info['privateIpAddresses'].append(ip_configuration['privateIPAddress']) + if 'publicIPAddress' in ip_configuration and ip_configuration['publicIPAddress'] and \ + ip_configuration['publicIPAddress']['id'] in ip_address_lookup: + public_ip_address = ip_address_lookup[ip_configuration['publicIPAddress']['id']] + + public_ip_addr_info = { + 'id': public_ip_address['id'], + 'name': public_ip_address['name'], + 'ipAddress': public_ip_address.get('ipAddress', None), + 'ipAllocationMethod': public_ip_address.get('publicIPAllocationMethod', None) + } + + try: + public_ip_addr_info['zone'] = public_ip_address['zones'][0] \ + if 'zones' in public_ip_address else None + except (KeyError, IndexError, TypeError): + pass + + network_info['publicIpAddresses'].append(public_ip_addr_info) + + result.append({ + 'virtualMachine': { + 'resourceGroup': nic_resource_group, + 'name': nic_vm_name, + 'network': network_info + } + }) + + return result + + +def open_vm_port(cmd, resource_group_name, vm_name, port, priority=900, network_security_group_name=None, + apply_to_subnet=False): + from azure.mgmt.core.tools import parse_resource_id + _nic = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic") + NicShow, NicUpdate = _nic.Show, _nic.Update + _subnet = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.vnet.subnet") + SubnetShow, SubnetUpdate = _subnet.Show, _subnet.Update + _nsg = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nsg") + NSGShow, NSGCreate = _nsg.Show, _nsg.Create + NSGRuleCreate = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nsg.rule").Create + + vm = get_vm(cmd, resource_group_name, vm_name) + location = vm.location + if not vm.network_profile: + raise CLIError("Network profile not found for VM '{}'".format(vm_name)) + + nic_ids = list(vm.network_profile.network_interfaces) + if len(nic_ids) > 1: + raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG ' + 'directly.') + if not nic_ids: + raise CLIError("No NIC associated with VM '{}'".format(vm_name)) + + # get existing NSG or create a new one + created_nsg = False + nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': os.path.split(nic_ids[0].id)[1], + 'resource_group': resource_group_name + }) + if not apply_to_subnet: + nsg = nic['networkSecurityGroup'] + else: + subnet_id = parse_resource_id(nic['ipConfigurations'][0]['subnet']['id']) + subnet = SubnetShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': subnet_id['child_name_1'], + 'vnet_name': subnet_id['name'], + 'resource_group': resource_group_name + }) + nsg = subnet['networkSecurityGroup'] if 'networkSecurityGroup' in subnet else None + + if not nsg: + nsg = LongRunningOperation(cmd.cli_ctx, 'Creating network security group')( + NSGCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': network_security_group_name, + 'resource_group': resource_group_name, + 'location': location + })) + created_nsg = True + + # update the NSG with the new rule to allow inbound traffic + + rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format((port.replace(',', '_'))) + + # use portranges if multiple ports are entered + if "," not in port: + port_arg = { + 'destination_port_range': port + } + else: + port_arg = { + 'destination_port_ranges': port.split(',') + } + + nsg_name = nsg['name'] if 'name' in nsg else os.path.split(nsg['id'])[1] + LongRunningOperation(cmd.cli_ctx, 'Adding security rule')( + NSGRuleCreate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': rule_name, + 'nsg_name': nsg_name, + 'resource_group': resource_group_name, + 'protocol': '*', + 'access': 'allow', + 'direction': 'inbound', + 'source_port_range': '*', + **port_arg, + 'priority': priority, + 'source_address_prefix': '*', + 'destination_address_prefix': '*' + }) + ) + + # update the NIC or subnet if a new NSG was created + if created_nsg and not apply_to_subnet: + nic['networkSecurityGroup'] = nsg + LongRunningOperation(cmd.cli_ctx, 'Updating NIC')( + NicUpdate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': nic['name'], + 'resource_group': resource_group_name, + 'security_rules': nic + })) + elif created_nsg and apply_to_subnet: + subnet['networkSecurityGroup'] = nsg + LongRunningOperation(cmd.cli_ctx, 'Updating subnet')( + SubnetUpdate(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': subnet_id['child_name_1'], + 'resource_group': resource_group_name, + 'vnet_name': subnet_id['name'], + 'subnet': subnet + }) + ) + + return NSGShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': nsg_name, + 'resource_group': resource_group_name + }) + + +def resize_vm(cmd, resource_group_name, vm_name, size, no_wait=False): + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + if vm.hardware_profile.vm_size == size: + logger.warning("VM is already %s", size) + return None + + vm.hardware_profile.vm_size = size # pylint: disable=no-member + return set_vm(cmd, vm, no_wait=no_wait) + + +def restart_vm(cmd, resource_group_name, vm_name, no_wait=False, force=False): + client = _compute_client_factory(cmd.cli_ctx) + if force: + return sdk_no_wait(no_wait, client.virtual_machines.begin_redeploy, resource_group_name, vm_name) + return sdk_no_wait(no_wait, client.virtual_machines.begin_restart, resource_group_name, vm_name) + + +def set_vm(cmd, instance, lro_operation=None, no_wait=False): + instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934 + client = _compute_client_factory(cmd.cli_ctx) + parsed_id = _parse_rg_name(instance.id) + poller = sdk_no_wait(no_wait, client.virtual_machines.begin_create_or_update, + resource_group_name=parsed_id[0], + vm_name=parsed_id[1], + parameters=instance) + if lro_operation: + return lro_operation(poller) + + return LongRunningOperation(cmd.cli_ctx)(poller) + + +def patch_vm(cmd, resource_group_name, vm_name, vm): + client = _compute_client_factory(cmd.cli_ctx) + poller = client.virtual_machines.begin_update(resource_group_name, vm_name, vm) + return LongRunningOperation(cmd.cli_ctx)(poller) + + +def patch_disk_encryption_set(cmd, resource_group_name, disk_encryption_set_name, disk_encryption_set_update): + client = _compute_client_factory(cmd.cli_ctx) + poller = client.disk_encryption_sets.begin_update(resource_group_name, disk_encryption_set_name, + disk_encryption_set_update) + return LongRunningOperation(cmd.cli_ctx)(poller) + + +def show_vm(cmd, resource_group_name, vm_name, show_details=False, include_user_data=False): + if show_details: + return get_vm_details(cmd, resource_group_name, vm_name, include_user_data) + + expand = None + if include_user_data: + expand = "userData" + return get_vm(cmd, resource_group_name, vm_name, expand) + + +def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None, + write_accelerator=None, license_type=None, no_wait=False, ultra_ssd_enabled=None, + priority=None, max_price=None, proximity_placement_group=None, workspace=None, enable_secure_boot=None, + enable_vtpm=None, user_data=None, capacity_reservation_group=None, + dedicated_host=None, dedicated_host_group=None, size=None, ephemeral_os_disk_placement=None, + enable_hibernation=None, v_cpus_available=None, v_cpus_per_core=None, disk_controller_type=None, + security_type=None, enable_proxy_agent=None, proxy_agent_mode=None, **kwargs): + from azure.mgmt.core.tools import parse_resource_id, resource_id, is_valid_resource_id + from ._vm_utils import update_write_accelerator_settings, update_disk_caching + SecurityProfile, UefiSettings = cmd.get_models('SecurityProfile', 'UefiSettings') + vm = kwargs['parameters'] + + disk_name = None + if os_disk is not None: + if is_valid_resource_id(os_disk): + disk_id = os_disk + os_disk_id_parsed = parse_resource_id(os_disk) + disk_name = os_disk_id_parsed['name'] + else: + vm_id_parsed = parse_resource_id(vm.id) + disk_id = resource_id(subscription=vm_id_parsed['subscription'], + resource_group=vm_id_parsed['resource_group'], + namespace='Microsoft.Compute', type='disks', name=os_disk) + disk_name = os_disk + vm.storage_profile.os_disk.managed_disk.id = disk_id + vm.storage_profile.os_disk.name = disk_name + + if security_type == "TrustedLaunch": + from azure.cli.core.azclierror import InvalidArgumentValueError + if vm.security_profile is not None and vm.security_profile.security_type == "ConfidentialVM": + raise InvalidArgumentValueError("{} is already configured with ConfidentialVM. Security Configuration " + "cannot be updated from ConfidentialVM to TrustedLaunch.".format(vm.name)) + + if disk_name is None and vm.storage_profile.os_disk.managed_disk is not None: + os_disk_id_parsed = parse_resource_id(vm.storage_profile.os_disk.managed_disk.id) + disk_name = os_disk_id_parsed['name'] + + if disk_name is not None: + # Set --enable-secure-boot True and --enable-vtpm True if not specified by end user. + enable_secure_boot = enable_secure_boot if enable_secure_boot is not None else True + enable_vtpm = enable_vtpm if enable_vtpm is not None else True + + if vm.security_profile is None: + vm.security_profile = SecurityProfile() + vm.security_profile.security_type = security_type + + if write_accelerator is not None: + update_write_accelerator_settings(vm.storage_profile, write_accelerator) + + if disk_caching is not None: + update_disk_caching(vm.storage_profile, disk_caching) + + if license_type is not None: + vm.license_type = license_type + + if user_data is not None: + from azure.cli.core.util import b64encode + vm.user_data = b64encode(user_data) + + if capacity_reservation_group is not None: + CapacityReservationProfile = cmd.get_models('CapacityReservationProfile') + SubResource = cmd.get_models('SubResource') + if capacity_reservation_group == 'None': + capacity_reservation_group = None + sub_resource = SubResource(id=capacity_reservation_group) + capacity_reservation = CapacityReservationProfile(capacity_reservation_group=sub_resource) + vm.capacity_reservation = capacity_reservation + + if dedicated_host is not None: + if vm.host is None: + DedicatedHost = cmd.get_models('SubResource') + vm.host = DedicatedHost(additional_properties={}, id=dedicated_host) + else: + vm.host.id = dedicated_host + if vm.host_group is not None: + vm.host_group = None + + if dedicated_host_group is not None: + if vm.host_group is None: + DedicatedHostGroup = cmd.get_models('SubResource') + vm.host_group = DedicatedHostGroup(additional_properties={}, id=dedicated_host_group) + else: + vm.host_group.id = dedicated_host_group + if vm.host is not None: + vm.host = None + + if ultra_ssd_enabled is not None: + if vm.additional_capabilities is None: + AdditionalCapabilities = cmd.get_models('AdditionalCapabilities') + vm.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled) + else: + vm.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled + + if enable_hibernation is not None: + if vm.additional_capabilities is None: + AdditionalCapabilities = cmd.get_models('AdditionalCapabilities') + vm.additional_capabilities = AdditionalCapabilities(hibernation_enabled=enable_hibernation) + else: + vm.additional_capabilities.hibernation_enabled = enable_hibernation + + if priority is not None: + vm.priority = priority + + if max_price is not None: + if vm.billing_profile is None: + BillingProfile = cmd.get_models('BillingProfile') + vm.billing_profile = BillingProfile(max_price=max_price) + else: + vm.billing_profile.max_price = max_price + + if proximity_placement_group is not None: + vm.proximity_placement_group = {'id': proximity_placement_group} + + if enable_secure_boot is not None or enable_vtpm is not None: + if vm.security_profile is None: + vm.security_profile = SecurityProfile() + + vm.security_profile.uefi_settings = UefiSettings(secure_boot_enabled=enable_secure_boot, + v_tpm_enabled=enable_vtpm) + + if enable_proxy_agent is not None or proxy_agent_mode is not None: + ProxyAgentSettings = cmd.get_models('ProxyAgentSettings') + if vm.security_profile is None: + vm.security_profile = SecurityProfile() + vm.security_profile.proxy_agent_settings = ProxyAgentSettings(enabled=enable_proxy_agent, + mode=proxy_agent_mode) + elif vm.security_profile.proxy_agent_settings is None: + vm.security_profile.proxy_agent_settings = ProxyAgentSettings(enabled=enable_proxy_agent, + mode=proxy_agent_mode) + else: + vm.security_profile.proxy_agent_settings.enabled = enable_proxy_agent + vm.security_profile.proxy_agent_settings.mode = proxy_agent_mode + + if workspace is not None: + workspace_id = _prepare_workspace(cmd, resource_group_name, workspace) + workspace_name = parse_resource_id(workspace_id)['name'] + _set_log_analytics_workspace_extension(cmd=cmd, + resource_group_name=resource_group_name, + vm=vm, + vm_name=vm_name, + workspace_name=workspace_name) + os_type = vm.storage_profile.os_disk.os_type if vm.storage_profile.os_disk.os_type else None + _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name) + + aux_subscriptions = None + if vm and vm.storage_profile and vm.storage_profile.image_reference and 'id' in vm.storage_profile.image_reference: + aux_subscriptions = _parse_aux_subscriptions(vm.storage_profile.image_reference['id']) + + if size is not None: + if vm.hardware_profile.vm_size == size: + logger.warning("VM size is already %s", size) + else: + vm.hardware_profile.vm_size = size + + if v_cpus_available is not None: + vm.hardware_profile.vm_size_properties.v_cpus_available = v_cpus_available + + if v_cpus_per_core is not None: + vm.hardware_profile.vm_size_properties.v_cpus_per_core = v_cpus_per_core + + if ephemeral_os_disk_placement is not None: + if vm.storage_profile.os_disk.diff_disk_settings is not None: + vm.storage_profile.os_disk.diff_disk_settings.placement = ephemeral_os_disk_placement + else: + raise ValidationError("Please update the argument '--ephemeral-os-disk-placement' when " + "creating VM with the option '--ephemeral-os-disk true'") + if disk_controller_type is not None: + vm.storage_profile.disk_controller_type = disk_controller_type + + client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions) + return sdk_no_wait(no_wait, client.virtual_machines.begin_create_or_update, resource_group_name, vm_name, **kwargs) + + +# endregion + + +# region VirtualMachines AvailabilitySets +def _get_availset(cmd, resource_group_name, name): + return _compute_client_factory(cmd.cli_ctx).availability_sets.get(resource_group_name, name) + + +def _set_availset(cmd, resource_group_name, name, **kwargs): + return _compute_client_factory(cmd.cli_ctx).availability_sets.create_or_update(resource_group_name, name, **kwargs) + + +# pylint: disable=inconsistent-return-statements +def convert_av_set_to_managed_disk(cmd, resource_group_name, availability_set_name): + av_set = _get_availset(cmd, resource_group_name, availability_set_name) + if av_set.sku.name != 'Aligned': + av_set.sku.name = 'Aligned' + + # let us double check whether the existing FD number is supported + skus = list_skus(cmd, av_set.location) + av_sku = next((s for s in skus if s.resource_type == 'availabilitySets' and s.name == 'Aligned'), None) + if av_sku and av_sku.capabilities: + max_fd = int(next((c.value for c in av_sku.capabilities if c.name == 'MaximumPlatformFaultDomainCount'), + '0')) + if max_fd and max_fd < av_set.platform_fault_domain_count: + logger.warning("The fault domain count will be adjusted from %s to %s so to stay within region's " + "limitation", av_set.platform_fault_domain_count, max_fd) + av_set.platform_fault_domain_count = max_fd + + return _set_availset(cmd, resource_group_name=resource_group_name, name=availability_set_name, + parameters=av_set) + logger.warning('Availability set %s is already configured for managed disks.', availability_set_name) + + +def create_av_set(cmd, availability_set_name, resource_group_name, platform_fault_domain_count=2, + platform_update_domain_count=None, location=None, proximity_placement_group=None, unmanaged=False, + no_wait=False, tags=None, validate=False): + from azure.cli.core.util import random_string + from azure.cli.core.commands.arm import ArmTemplateBuilder + from azure.cli.command_modules.vm.azure_stack._template_builder import build_av_set_resource + + tags = tags or {} + + # Build up the ARM template + master_template = ArmTemplateBuilder() + + av_set_resource = build_av_set_resource(cmd, availability_set_name, location, tags, + platform_update_domain_count, + platform_fault_domain_count, unmanaged, + proximity_placement_group=proximity_placement_group) + master_template.add_resource(av_set_resource) + + template = master_template.build() + + # deploy ARM template + deployment_name = 'av_set_deploy_' + random_string(32) + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments + DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + properties = DeploymentProperties(template=template, parameters={}, mode='incremental') + Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + deployment = Deployment(properties=properties) + + if validate: + if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): + validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) + return LongRunningOperation(cmd.cli_ctx)(validation_poller) + + return client.validate(resource_group_name, deployment_name, deployment) + + if no_wait: + return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment) + LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.begin_create_or_update, + resource_group_name, deployment_name, deployment)) + + compute_client = _compute_client_factory(cmd.cli_ctx) + return compute_client.availability_sets.get(resource_group_name, availability_set_name) + + +def update_av_set(instance, resource_group_name, proximity_placement_group=None): + if proximity_placement_group is not None: + instance.proximity_placement_group = {'id': proximity_placement_group} + return instance + + +def list_av_sets(cmd, resource_group_name=None): + op_group = _compute_client_factory(cmd.cli_ctx).availability_sets + if resource_group_name: + return op_group.list(resource_group_name) + return op_group.list_by_subscription(expand='virtualMachines/$ref') + + +# endregion + + +# region VirtualMachines BootDiagnostics +def disable_boot_diagnostics(cmd, resource_group_name, vm_name): + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + diag_profile = vm.diagnostics_profile + if not (diag_profile and diag_profile.boot_diagnostics and diag_profile.boot_diagnostics.enabled): + return + + diag_profile.boot_diagnostics.enabled = False + diag_profile.boot_diagnostics.storage_uri = None + set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'disabling boot diagnostics', 'done')) + + +def enable_boot_diagnostics(cmd, resource_group_name, vm_name, storage=None): + from azure.cli.command_modules.vm.azure_stack._vm_utils import get_storage_blob_uri + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + storage_uri = None + if storage: + storage_uri = get_storage_blob_uri(cmd.cli_ctx, storage) + + DiagnosticsProfile, BootDiagnostics = cmd.get_models('DiagnosticsProfile', 'BootDiagnostics') + + boot_diag = BootDiagnostics(enabled=True, storage_uri=storage_uri) + if vm.diagnostics_profile is None: + vm.diagnostics_profile = DiagnosticsProfile(boot_diagnostics=boot_diag) + else: + vm.diagnostics_profile.boot_diagnostics = boot_diag + + set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done')) + + +class BootLogStreamWriter: # pylint: disable=too-few-public-methods + + def __init__(self, out): + self.out = out + + def write(self, str_or_bytes): + content = str_or_bytes + if isinstance(str_or_bytes, bytes): + try: + content = str_or_bytes.decode('utf8') + except UnicodeDecodeError: + logger.warning("A few characters have been ignored because they were not valid unicode.") + content = str_or_bytes.decode('ascii', 'ignore') + try: + self.out.write(content) + except UnicodeEncodeError: + # e.g. 'charmap' codec can't encode characters in position 258829-258830: character maps to + import unicodedata + ascii_content = unicodedata.normalize('NFKD', content).encode('ascii', 'ignore') + self.out.write(ascii_content.decode()) + logger.warning("A few unicode characters have been ignored because the shell is not able to display. " + "To see the full log, use a shell with unicode capacity") + + +def get_boot_log(cmd, resource_group_name, vm_name): + import re + import sys + from azure.cli.core.profiles import get_sdk + from azure.core.exceptions import HttpResponseError + BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob.blockblobservice#BlockBlobService') + + client = _compute_client_factory(cmd.cli_ctx) + + virtual_machine = client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView') + # pylint: disable=no-member + + blob_uri = None + if virtual_machine.instance_view and virtual_machine.instance_view.boot_diagnostics: + blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri + + # Managed storage + if blob_uri is None: + try: + boot_diagnostics_data = client.virtual_machines.retrieve_boot_diagnostics_data(resource_group_name, vm_name) + blob_uri = boot_diagnostics_data.serial_console_log_blob_uri + except HttpResponseError: + pass + if blob_uri is None: + raise CLIError('Please enable boot diagnostics.') + return requests.get(blob_uri).content + + # Find storage account for diagnostics + storage_mgmt_client = _get_storage_management_client(cmd.cli_ctx) + if not blob_uri: + raise CLIError('No console log available') + try: + storage_accounts = storage_mgmt_client.storage_accounts.list() + matching_storage_account = (a for a in list(storage_accounts) + if a.primary_endpoints.blob and blob_uri.startswith(a.primary_endpoints.blob)) + storage_account = next(matching_storage_account) + except StopIteration: + raise CLIError('Failed to find storage account for console log file') + + regex = r'/subscriptions/[^/]+/resourceGroups/(?P[^/]+)/.+' + match = re.search(regex, storage_account.id, re.I) + rg = match.group('rg') + # Get account key + keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name) + + # Extract container and blob name from url... + container, blob = urlparse(blob_uri).path.split('/')[-2:] + + storage_client = get_data_service_client( + cmd.cli_ctx, + BlockBlobService, + storage_account.name, + keys.keys[0].value, + endpoint_suffix=cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member + + # our streamwriter not seekable, so no parallel. + storage_client.get_blob_to_stream(container, blob, BootLogStreamWriter(sys.stdout), max_connections=1) + + +def get_boot_log_uris(cmd, resource_group_name, vm_name, expire=None): + client = _compute_client_factory(cmd.cli_ctx) + return client.virtual_machines.retrieve_boot_diagnostics_data( + resource_group_name, vm_name, sas_uri_expiration_time_in_minutes=expire) + + +# endregion + + +# region VirtualMachines Diagnostics +def set_diagnostics_extension( + cmd, resource_group_name, vm_name, settings, protected_settings=None, version=None, + no_auto_upgrade=False): + client = _compute_client_factory(cmd.cli_ctx) + vm = client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView') + # pylint: disable=no-member + is_linux_os = _is_linux_os(vm) + vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT + if is_linux_os: # check incompatible version + exts = vm.instance_view.extensions or [] + major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0] + if next((e for e in exts if e.name == vm_extension_name and + not e.type_handler_version.startswith(major_ver + '.')), None): + logger.warning('There is an incompatible version of diagnostics extension installed. ' + 'We will update it with a new version') + poller = client.virtual_machine_extensions.begin_delete(resource_group_name, vm_name, vm_extension_name) + LongRunningOperation(cmd.cli_ctx)(poller) + + return set_extension(cmd, resource_group_name, vm_name, vm_extension_name, + extension_mappings[vm_extension_name]['publisher'], + version or extension_mappings[vm_extension_name]['version'], + settings, + protected_settings, + no_auto_upgrade) + + +def show_default_diagnostics_configuration(is_windows_os=False): + public_settings = get_default_diag_config(is_windows_os) + # pylint: disable=line-too-long + protected_settings_info = json.dumps({ + 'storageAccountName': "__STORAGE_ACCOUNT_NAME__", + # LAD and WAD are not consistent on sas token format. Call it out here + "storageAccountSasToken": "__SAS_TOKEN_{}__".format( + "WITH_LEADING_QUESTION_MARK" if is_windows_os else "WITHOUT_LEADING_QUESTION_MARK") + }, indent=2) + logger.warning( + 'Protected settings with storage account info is required to work with the default configurations, e.g. \n%s', + protected_settings_info) + return public_settings + + +# endregion + + +# region VirtualMachines Disks (Managed) +def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk=None, ids=None, disks=None, new=False, sku=None, + size_gb=None, lun=None, caching=None, enable_write_accelerator=False, disk_ids=None): + # attach multiple managed disks using disk attach API + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + if not new and not sku and not size_gb and disk_ids is not None: + if lun: + disk_lun = lun + else: + disk_lun = _get_disk_lun(vm.storage_profile.data_disks) + + data_disks = [] + for disk_item in disk_ids: + disk = { + 'diskId': disk_item, + 'caching': caching, + 'lun': disk_lun, + 'writeAcceleratorEnabled': enable_write_accelerator + } + data_disks.append(disk) + disk_lun += 1 + result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + 'data_disks_to_attach': data_disks + }) + return result + else: + # attach multiple managed disks using vm PUT API + from azure.mgmt.core.tools import parse_resource_id + DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models( + 'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes') + if size_gb is None: + size_gb = 1023 + + if disk_ids is not None: + disks = disk_ids + + for disk_item in disks: + if lun: + disk_lun = lun + else: + disk_lun = _get_disk_lun(vm.storage_profile.data_disks) + + if new: + data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.empty, + name=parse_resource_id(disk_item)['name'], + disk_size_gb=size_gb, caching=caching, + managed_disk=ManagedDiskParameters(storage_account_type=sku)) + else: + params = ManagedDiskParameters(id=disk_item, storage_account_type=sku) + data_disk = DataDisk(lun=disk_lun, create_option=DiskCreateOption.attach, managed_disk=params, + caching=caching) + + if enable_write_accelerator: + data_disk.write_accelerator_enabled = enable_write_accelerator + + vm.storage_profile.data_disks.append(data_disk) + + set_vm(cmd, vm) + + +def detach_unmanaged_data_disk(cmd, resource_group_name, vm_name, disk_name): + # here we handle unmanaged disk + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + # pylint: disable=no-member + leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()] + if len(vm.storage_profile.data_disks) == len(leftovers): + raise CLIError("No disk with the name '{}' was found".format(disk_name)) + vm.storage_profile.data_disks = leftovers + set_vm(cmd, vm) + + +# endregion + + +def detach_managed_data_disk(cmd, resource_group_name, vm_name, disk_name=None, force_detach=None, disk_ids=None): + if disk_ids is not None: + data_disks = [] + for disk_item in disk_ids: + disk = {'diskId': disk_item, 'detachOption': 'ForceDetach' if force_detach else None} + data_disks.append(disk) + result = AttachDetachDataDisk(cli_ctx=cmd.cli_ctx)(command_args={ + 'vm_name': vm_name, + 'resource_group': resource_group_name, + 'data_disks_to_detach': data_disks + }) + return result + else: + # here we handle managed disk + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + if not force_detach: + # pylint: disable=no-member + leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()] + if len(vm.storage_profile.data_disks) == len(leftovers): + raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name)) + else: + DiskDetachOptionTypes = cmd.get_models('DiskDetachOptionTypes', resource_type=ResourceType.MGMT_COMPUTE, + operation_group='virtual_machines') + leftovers = vm.storage_profile.data_disks + is_contains = False + for d in leftovers: + if d.name.lower() == disk_name.lower(): + d.to_be_detached = True + d.detach_option = DiskDetachOptionTypes.FORCE_DETACH + is_contains = True + break + if not is_contains: + raise ResourceNotFoundError("No disk with the name '{}' was found".format(disk_name)) + vm.storage_profile.data_disks = leftovers + set_vm(cmd, vm) + + +# endregion + + +# region VirtualMachines Extensions +def list_extensions(cmd, resource_group_name, vm_name): + vm = get_vm(cmd, resource_group_name, vm_name) + extension_type = 'Microsoft.Compute/virtualMachines/extensions' + result = [r for r in (vm.resources or []) if r.type == extension_type] + return result + + +def show_extensions(cmd, resource_group_name, vm_name, vm_extension_name, instance_view=False, expand=None): + if instance_view: + expand = 'instanceView' + client = _compute_client_factory(cmd.cli_ctx).virtual_machine_extensions + return client.get(resource_group_name=resource_group_name, + vm_name=vm_name, + vm_extension_name=vm_extension_name, + expand=expand) + + +def set_extension(cmd, resource_group_name, vm_name, vm_extension_name, publisher, version=None, settings=None, + protected_settings=None, no_auto_upgrade=False, force_update=False, no_wait=False, + extension_instance_name=None, enable_auto_upgrade=None): + vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView') + client = _compute_client_factory(cmd.cli_ctx) + + if not extension_instance_name: + extension_instance_name = vm_extension_name + + VirtualMachineExtension = cmd.get_models('VirtualMachineExtension', + resource_type=ResourceType.MGMT_COMPUTE, + operation_group='virtual_machines') + instance_name = _get_extension_instance_name(vm.instance_view, publisher, vm_extension_name, + suggested_name=extension_instance_name) + if instance_name != extension_instance_name: + msg = "A %s extension with name %s already exists. Updating it with your settings..." + logger.warning(msg, vm_extension_name, instance_name) + if vm_extension_name == 'AHBForRHEL': + logger.warning('Please ensure that you are provisioning AHBForRHEL extension ' + 'on a Red Hat based operating system.') + if vm_extension_name == 'AHBForSLES': + logger.warning('Please ensure that you are provisioning AHBForSLES extension on a SLES based operating system.') + + auto_upgrade_extensions = ['GuestAttestation', 'CodeIntegrityAgent'] + if vm_extension_name in auto_upgrade_extensions and enable_auto_upgrade is None: + enable_auto_upgrade = True + + version = _normalize_extension_version(cmd.cli_ctx, publisher, vm_extension_name, version, vm.location) + ext = VirtualMachineExtension(location=vm.location, + publisher=publisher, + type_properties_type=vm_extension_name, + protected_settings=protected_settings, + type_handler_version=version, + settings=settings, + auto_upgrade_minor_version=(not no_auto_upgrade), + enable_automatic_upgrade=enable_auto_upgrade) + if force_update: + ext.force_update_tag = str(_gen_guid()) + return sdk_no_wait(no_wait, client.virtual_machine_extensions.begin_create_or_update, + resource_group_name, vm_name, instance_name, ext) + + +# endregion + + +# region VirtualMachines Extension Images +def list_vm_extension_images( + cmd, image_location=None, publisher_name=None, name=None, version=None, latest=False): + return load_extension_images_thru_services( + cmd.cli_ctx, publisher_name, name, version, image_location, latest) + + +# endregion + + +# region VirtualMachines Identity +def _remove_identities(cmd, resource_group_name, name, identities, getter, setter): + from ._vm_utils import MSI_LOCAL_ID + ResourceIdentityType = cmd.get_models('ResourceIdentityType', operation_group='virtual_machines') + remove_system_assigned_identity = False + if MSI_LOCAL_ID in identities: + remove_system_assigned_identity = True + identities.remove(MSI_LOCAL_ID) + resource = getter(cmd, resource_group_name, name) + if resource.identity is None: + return None + emsis_to_remove = [] + if identities: + existing_emsis = {x.lower() for x in list((resource.identity.user_assigned_identities or {}).keys())} + emsis_to_remove = {x.lower() for x in identities} + non_existing = emsis_to_remove.difference(existing_emsis) + if non_existing: + raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name)) + if not list(existing_emsis - emsis_to_remove): # if all emsis are gone, we need to update the type + if resource.identity.type == ResourceIdentityType.user_assigned: + resource.identity.type = ResourceIdentityType.none + elif resource.identity.type == ResourceIdentityType.system_assigned_user_assigned: + resource.identity.type = ResourceIdentityType.system_assigned + + resource.identity.user_assigned_identities = None + if remove_system_assigned_identity: + resource.identity.type = (ResourceIdentityType.none + if resource.identity.type == ResourceIdentityType.system_assigned + else ResourceIdentityType.user_assigned) + + if emsis_to_remove: + if resource.identity.type not in [ResourceIdentityType.none, ResourceIdentityType.system_assigned]: + resource.identity.user_assigned_identities = {} + for identity in emsis_to_remove: + resource.identity.user_assigned_identities[identity] = None + + result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource)) + return result.identity + + +def remove_vm_identity(cmd, resource_group_name, vm_name, identities=None): + def setter(resource_group_name, vm_name, vm): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineUpdate = cmd.get_models('VirtualMachineUpdate', operation_group='virtual_machines') + vm_update = VirtualMachineUpdate(identity=vm.identity) + return client.virtual_machines.begin_update(resource_group_name, vm_name, vm_update) + + if identities is None: + from ._vm_utils import MSI_LOCAL_ID + identities = [MSI_LOCAL_ID] + + return _remove_identities(cmd, resource_group_name, vm_name, identities, get_vm, setter) + + +# region VirtualMachines Identity +def _remove_disk_encryption_set_identities(cmd, resource_group_name, name, + mi_system_assigned, mi_user_assigned, getter, setter): + IdentityType = cmd.get_models('DiskEncryptionSetIdentityType', operation_group='disk_encryption_sets') + remove_system_assigned_identity = mi_system_assigned is not None + + resource = getter(cmd, resource_group_name, name) + if resource is None or resource.identity is None: + return None + + user_identities_to_remove = [] + if mi_user_assigned is not None: + existing_user_identities = {x.lower() for x in list((resource.identity.user_assigned_identities or {}).keys())} + # all user assigned identities will be removed if the length of mi_user_assigned is 0, + # otherwise the specified identity + user_identities_to_remove = {x.lower() for x in mi_user_assigned} \ + if len(mi_user_assigned) > 0 else existing_user_identities + non_existing = user_identities_to_remove.difference(existing_user_identities) + if non_existing: + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError("'{}' are not associated with '{}', please provide existing user managed " + "identities".format(','.join(non_existing), name)) + if not list(existing_user_identities - user_identities_to_remove): + if resource.identity.type == IdentityType.USER_ASSIGNED: + resource.identity.type = IdentityType.NONE + elif resource.identity.type == IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED: + resource.identity.type = IdentityType.SYSTEM_ASSIGNED + + resource.identity.user_assigned_identities = None + if remove_system_assigned_identity: + resource.identity.type = (IdentityType.NONE + if resource.identity.type == IdentityType.SYSTEM_ASSIGNED + else IdentityType.USER_ASSIGNED) + + if user_identities_to_remove: + if resource.identity.type not in [IdentityType.NONE, IdentityType.SYSTEM_ASSIGNED]: + resource.identity.user_assigned_identities = {} + for identity in user_identities_to_remove: + resource.identity.user_assigned_identities[identity] = None + + result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource)) + return result.identity + + +# endregion + + +# region VirtualMachines Images +def list_vm_images(cmd, image_location=None, publisher_name=None, offer=None, sku=None, all=False, + # pylint: disable=redefined-builtin + edge_zone=None, architecture=None): + load_thru_services = all or edge_zone is not None + + if load_thru_services: + if not publisher_name and not offer and not sku and not edge_zone: + logger.warning("You are retrieving all the images from server which could take more than a minute. " + "To shorten the wait, provide '--publisher', '--offer' , '--sku' or '--edge-zone'." + " Partial name search is supported.") + all_images = load_images_thru_services(cmd.cli_ctx, publisher_name, offer, sku, image_location, edge_zone, + architecture) + else: + all_images = load_images_from_aliases_doc(cmd.cli_ctx, publisher_name, offer, sku, architecture) + logger.warning('You are viewing an offline list of images, use --all to retrieve an up-to-date list') + + if edge_zone is not None: + for i in all_images: + i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['edge_zone'], i['version']]) + else: + for i in all_images: + i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']]) + return all_images + + +def list_offers(cmd, publisher_name, location, edge_zone=None): + if edge_zone is not None: + edge_zone_client = get_mgmt_service_client(cmd.cli_ctx, + ResourceType.MGMT_COMPUTE).virtual_machine_images_edge_zone + return edge_zone_client.list_offers(location=location, edge_zone=edge_zone, publisher_name=publisher_name) + else: + client = _compute_client_factory(cmd.cli_ctx).virtual_machine_images + return client.list_offers(location=location, publisher_name=publisher_name) + + +def list_publishers(cmd, location, edge_zone=None): + if edge_zone is not None: + edge_zone_client = get_mgmt_service_client(cmd.cli_ctx, + ResourceType.MGMT_COMPUTE).virtual_machine_images_edge_zone + return edge_zone_client.list_publishers(location=location, edge_zone=edge_zone) + else: + client = _compute_client_factory(cmd.cli_ctx).virtual_machine_images + return client.list_publishers(location=location) + + +def list_sku(cmd, location, publisher_name, offer, edge_zone=None, ): + if edge_zone is not None: + edge_zone_client = get_mgmt_service_client(cmd.cli_ctx, + ResourceType.MGMT_COMPUTE).virtual_machine_images_edge_zone + return edge_zone_client.list_skus(location=location, edge_zone=edge_zone, + publisher_name=publisher_name, offer=offer) + else: + client = _compute_client_factory(cmd.cli_ctx).virtual_machine_images + return client.list_skus(location=location, publisher_name=publisher_name, offer=offer) + + +def show_vm_image(cmd, urn=None, publisher=None, offer=None, sku=None, version=None, location=None, edge_zone=None): + from azure.cli.core.commands.parameters import get_one_of_subscription_locations + from azure.cli.core.azclierror import (MutuallyExclusiveArgumentError, + InvalidArgumentValueError) + + location = location or get_one_of_subscription_locations(cmd.cli_ctx) + error_msg = 'Please specify all of (--publisher, --offer, --sku, --version), or --urn' + if urn: + if any([publisher, offer, sku, edge_zone, version]): + recommendation = 'Try to use --urn publisher:offer:sku:version or' \ + ' --urn publisher:offer:sku:edge_zone:version' + raise MutuallyExclusiveArgumentError(error_msg, recommendation) + items = urn.split(":") + if len(items) != 4 and len(items) != 5: + raise InvalidArgumentValueError( + '--urn should be in the format of publisher:offer:sku:version or publisher:offer:sku:edge_zone:version') + if len(items) == 5: + publisher, offer, sku, edge_zone, version = urn.split(":") + elif len(items) == 4: + publisher, offer, sku, version = urn.split(":") + if version.lower() == 'latest': + version = _get_latest_image_version(cmd.cli_ctx, location, publisher, offer, sku) + elif not publisher or not offer or not sku or not version: + raise RequiredArgumentMissingError(error_msg) + if edge_zone is not None: + edge_zone_client = get_mgmt_service_client(cmd.cli_ctx, + ResourceType.MGMT_COMPUTE).virtual_machine_images_edge_zone + return edge_zone_client.get(location=location, edge_zone=edge_zone, publisher_name=publisher, offer=offer, + skus=sku, version=version) + else: + client = _compute_client_factory(cmd.cli_ctx) + return client.virtual_machine_images.get(location=location, publisher_name=publisher, + offer=offer, skus=sku, version=version) + + +def accept_market_ordering_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements + from azure.mgmt.marketplaceordering.models import OfferType + from azure.cli.core.azclierror import (MutuallyExclusiveArgumentError, + InvalidArgumentValueError) + + error_msg = 'Please specify all of (--plan, --offer, --publish), or --urn' + if urn: + if any([publisher, offer, plan]): + recommendation = 'Try to use --urn publisher:offer:sku:version only' + raise MutuallyExclusiveArgumentError(error_msg, recommendation) + items = urn.split(':') + if len(items) != 4: + raise InvalidArgumentValueError('--urn should be in the format of publisher:offer:sku:version') + publisher, offer, _, _ = items + image = show_vm_image(cmd, urn) + if not image.plan: + logger.warning("Image '%s' has no terms to accept.", urn) + return + plan = image.plan.name + else: + if not publisher or not offer or not plan: + raise RequiredArgumentMissingError(error_msg) + + market_place_client = get_mgmt_service_client(cmd.cli_ctx, MarketplaceOrderingAgreements) + + term = market_place_client.marketplace_agreements.get(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan) + term.accepted = True + return market_place_client.marketplace_agreements.create(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan, + parameters=term) + + +# endregion + + +def _terms_prepare(cmd, urn, publisher, offer, plan): + if urn: + if any([publisher, offer, plan]): + raise CLIError('usage error: If using --urn, do not use any of --plan, --offer, --publisher.') + terms = urn.split(':') + if len(terms) != 4: + raise CLIError('usage error: urn should be in the format of publisher:offer:sku:version.') + publisher, offer = terms[0], terms[1] + image = show_vm_image(cmd, urn) + if not image.plan: + raise CLIError("Image '%s' has no terms to accept." % urn) + plan = image.plan.name + else: + if not all([publisher, offer, plan]): + raise CLIError( + 'usage error: If not using --urn, all of --plan, --offer and --publisher should be provided.') + return publisher, offer, plan + + +def _accept_cancel_terms(cmd, urn, publisher, offer, plan, accept): + from azure.mgmt.marketplaceordering.models import OfferType + publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) + op = cf_vm_image_term(cmd.cli_ctx, '') + terms = op.get(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan) + terms.accepted = accept + return op.create(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan, + parameters=terms) + + +def accept_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + """ + Accept Azure Marketplace image terms so that the image can be used to create VMs. + :param cmd:cmd + :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted + :param publisher:Image publisher + :param offer:Image offer + :param plan:Image billing plan + :return: + """ + return _accept_cancel_terms(cmd, urn, publisher, offer, plan, True) + + +def cancel_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + """ + Cancel Azure Marketplace image terms. + :param cmd:cmd + :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted + :param publisher:Image publisher + :param offer:Image offer + :param plan:Image billing plan + :return: + """ + return _accept_cancel_terms(cmd, urn, publisher, offer, plan, False) + + +def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None): + """ + Get the details of Azure Marketplace image terms. + :param cmd:cmd + :param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted + :param publisher:Image publisher + :param offer:Image offer + :param plan:Image billing plan + :return: + """ + from azure.mgmt.marketplaceordering.models import OfferType + publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan) + op = cf_vm_image_term(cmd.cli_ctx, '') + terms = op.get(offer_type=OfferType.VIRTUALMACHINE, + publisher_id=publisher, + offer_id=offer, + plan_id=plan) + return terms + + +# region VirtualMachines NetworkInterfaces (NICs) +def show_vm_nic(cmd, resource_group_name, vm_name, nic): + from azure.mgmt.core.tools import parse_resource_id + + NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show + + vm = get_vm(cmd, resource_group_name, vm_name) + found = next( + (n for n in vm.network_profile.network_interfaces if nic.lower() == n.id.lower()), None + # pylint: disable=no-member + ) + if found: + nic_name = parse_resource_id(found.id)['name'] + return NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': nic_name, + 'resource_group': resource_group_name + }) + raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name)) + + +def list_vm_nics(cmd, resource_group_name, vm_name): + vm = get_vm(cmd, resource_group_name, vm_name) + return vm.network_profile.network_interfaces # pylint: disable=no-member + + +def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + new_nics = _build_nic_list(cmd, nics) + existing_nics = _get_existing_nics(vm) + return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic) + + +def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): + def to_delete(nic_id): + return [n for n in nics_to_delete if n.id.lower() == nic_id.lower()] + + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + nics_to_delete = _build_nic_list(cmd, nics) + existing_nics = _get_existing_nics(vm) + survived = [x for x in existing_nics if not to_delete(x.id)] + return _update_vm_nics(cmd, vm, survived, primary_nic) + + +def set_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None): + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + nics = _build_nic_list(cmd, nics) + return _update_vm_nics(cmd, vm, nics, primary_nic) + + +def _build_nic_list(cmd, nic_ids): + NicShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.nic").Show + + NetworkInterfaceReference = cmd.get_models('NetworkInterfaceReference') + nic_list = [] + if nic_ids: + # pylint: disable=no-member + for nic_id in nic_ids: + rg, name = _parse_rg_name(nic_id) + nic = NicShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': name, + 'resource_group': rg + }) + nic_list.append((NetworkInterfaceReference(id=nic['id'], primary=False))) + return nic_list + + +def _get_existing_nics(vm): + network_profile = getattr(vm, 'network_profile', None) + nics = [] + if network_profile is not None: + nics = network_profile.network_interfaces or [] + return nics + + +def _update_vm_nics(cmd, vm, nics, primary_nic): + NetworkProfile = cmd.get_models('NetworkProfile') + + if primary_nic: + try: + _, primary_nic_name = _parse_rg_name(primary_nic) + except IndexError: + primary_nic_name = primary_nic + + matched = [n for n in nics if _parse_rg_name(n.id)[1].lower() == primary_nic_name.lower()] + if not matched: + raise CLIError('Primary Nic {} is not found'.format(primary_nic)) + if len(matched) > 1: + raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic)) + for n in nics: + n.primary = False + matched[0].primary = True + elif nics: + if not [n for n in nics if n.primary]: + nics[0].primary = True + + network_profile = getattr(vm, 'network_profile', None) + if network_profile is None: + vm.network_profile = NetworkProfile(network_interfaces=nics) + else: + network_profile.network_interfaces = nics + + return set_vm(cmd, vm).network_profile.network_interfaces + + +# endregion + + +# region VirtualMachines RunCommand +def run_command_invoke(cmd, resource_group_name, vm_vmss_name, command_id, scripts=None, parameters=None, + instance_id=None): # pylint: disable=line-too-long + RunCommandInput, RunCommandInputParameter = cmd.get_models('RunCommandInput', 'RunCommandInputParameter') + + parameters = parameters or [] + run_command_input_parameters = [] + auto_arg_name_num = 0 + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + # RunCommand API requires named arguments, which doesn't make lots of sense for bash scripts + # using positional arguments, so here we provide names just to get API happy + # note, we don't handle mixing styles, but will consolidate by GA when API is settled + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command_input_parameters.append(RunCommandInputParameter(name=n, value=v)) + + client = _compute_client_factory(cmd.cli_ctx) + + # if instance_id, this is a vmss instance + if instance_id: + return client.virtual_machine_scale_set_vms.begin_run_command( + resource_group_name, vm_vmss_name, instance_id, + RunCommandInput(command_id=command_id, script=scripts, + parameters=run_command_input_parameters)) # pylint: disable=line-too-long + # otherwise this is a regular vm instance + return client.virtual_machines.begin_run_command( + resource_group_name, vm_vmss_name, + RunCommandInput(command_id=command_id, script=scripts, parameters=run_command_input_parameters)) + + +def vm_run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts=None, parameters=None): + return run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts, parameters) + + +def vm_run_command_create(client, + resource_group_name, + vm_name, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + run_command = {} + run_command['location'] = location + if tags is not None: + run_command['tags'] = tags + source = {} + if script is not None: + source['script'] = script + if script_uri is not None: + source['script_uri'] = script_uri + if command_id is not None: + source['command_id'] = command_id + run_command['source'] = source + if parameters is not None: + auto_arg_name_num = 0 + run_command['parameters'] = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['parameters'].append({'name': n, 'value': v}) + if protected_parameters is not None: + auto_arg_name_num = 0 + run_command['protected_parameters'] = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['protected_parameters'].append({'name': n, 'value': v}) + if async_execution is not None: + run_command['async_execution'] = async_execution + else: + run_command['async_execution'] = False + if run_as_user is not None: + run_command['run_as_user'] = run_as_user + if run_as_password is not None: + run_command['run_as_password'] = run_as_password + if timeout_in_seconds is not None: + run_command['timeout_in_seconds'] = timeout_in_seconds + if output_blob_uri is not None: + run_command['output_blob_uri'] = output_blob_uri + if error_blob_uri is not None: + run_command['error_blob_uri'] = error_blob_uri + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + vm_name=vm_name, + run_command_name=run_command_name, + run_command=run_command) + + +def vm_run_command_update(client, + resource_group_name, + vm_name, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + run_command = {} + run_command['location'] = location + if tags is not None: + run_command['tags'] = tags + source = {} + if script is not None: + source['script'] = script + if script_uri is not None: + source['script_uri'] = script_uri + if command_id is not None: + source['command_id'] = command_id + run_command['source'] = source + if parameters is not None: + auto_arg_name_num = 0 + run_command['parameters'] = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['parameters'].append({'name': n, 'value': v}) + if protected_parameters is not None: + auto_arg_name_num = 0 + run_command['protected_parameters'] = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['protected_parameters'].append({'name': n, 'value': v}) + if async_execution is not None: + run_command['async_execution'] = async_execution + else: + run_command['async_execution'] = False + if run_as_user is not None: + run_command['run_as_user'] = run_as_user + if run_as_password is not None: + run_command['run_as_password'] = run_as_password + if timeout_in_seconds is not None: + run_command['timeout_in_seconds'] = timeout_in_seconds + if output_blob_uri is not None: + run_command['output_blob_uri'] = output_blob_uri + if error_blob_uri is not None: + run_command['error_blob_uri'] = error_blob_uri + return sdk_no_wait(no_wait, + client.begin_update, + resource_group_name=resource_group_name, + vm_name=vm_name, + run_command_name=run_command_name, + run_command=run_command) + + +def vm_run_command_delete(client, + resource_group_name, + vm_name, + run_command_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + vm_name=vm_name, + run_command_name=run_command_name) + + +def vm_run_command_list(client, + resource_group_name=None, + vm_name=None, + expand=None, + location=None): + if not location and not (resource_group_name and vm_name): + raise RequiredArgumentMissingError("Please specify --location or specify --vm-name and --resource-group") + + if vm_name: + return client.list_by_virtual_machine(resource_group_name=resource_group_name, vm_name=vm_name, expand=expand) + + return client.list(location=location) + + +def vm_run_command_show(client, + resource_group_name=None, + vm_name=None, + run_command_name=None, + expand=None, + instance_view=False, + location=None, + command_id=None): + if not (resource_group_name and vm_name and run_command_name) and not (location and command_id): + raise RequiredArgumentMissingError( + "Please specify --location and --command-id or specify --vm-name, --resource-group and --run-command-name") + + if vm_name: + if instance_view: + expand = 'instanceView' + return client.get_by_virtual_machine(resource_group_name=resource_group_name, vm_name=vm_name, + run_command_name=run_command_name, expand=expand) + + return client.get(location=location, command_id=command_id) + + +# endregion + + +# region VirtualMachines Secrets +def _get_vault_id_from_name(cli_ctx, client, vault_name): + group_name = _get_resource_group_from_vault_name(cli_ctx, vault_name) + if not group_name: + raise CLIError("unable to find vault '{}' in current subscription.".format(vault_name)) + vault = client.get(group_name, vault_name) + return vault.id + + +def get_vm_format_secret(cmd, secrets, certificate_store=None, keyvault=None, resource_group_name=None): + from azure.keyvault.secrets._shared import parse_key_vault_id + import re + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults + grouped_secrets = {} + + merged_secrets = [] + for s in secrets: + merged_secrets += s.splitlines() + + # group secrets by source vault + for secret in merged_secrets: + parsed = parse_key_vault_id(secret) + match = re.search('://(.+?)\\.', parsed.vault_url) + vault_name = match.group(1) + if vault_name not in grouped_secrets: + grouped_secrets[vault_name] = { + 'vaultCertificates': [], + 'id': keyvault or _get_vault_id_from_name(cmd.cli_ctx, client, vault_name) + } + + vault_cert = {'certificateUrl': secret} + if certificate_store: + vault_cert['certificateStore'] = certificate_store + + grouped_secrets[vault_name]['vaultCertificates'].append(vault_cert) + + # transform the reduced map to vm format + formatted = [{'sourceVault': {'id': value['id']}, + 'vaultCertificates': value['vaultCertificates']} + for _, value in list(grouped_secrets.items())] + + return formatted + + +def add_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate, certificate_store=None): + from azure.mgmt.core.tools import parse_resource_id + from ._vm_utils import create_data_plane_keyvault_certificate_client, get_key_vault_base_url + VaultSecretGroup, SubResource, VaultCertificate = cmd.get_models( + 'VaultSecretGroup', 'SubResource', 'VaultCertificate') + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + + if '://' not in certificate: # has a cert name rather a full url? + keyvault_client = create_data_plane_keyvault_certificate_client( + cmd.cli_ctx, get_key_vault_base_url(cmd.cli_ctx, parse_resource_id(keyvault)['name'])) + cert_info = keyvault_client.get_certificate(certificate) + certificate = cert_info.secret_id + + if not _is_linux_os(vm): + certificate_store = certificate_store or 'My' + elif certificate_store: + raise CLIError('Usage error: --certificate-store is only applicable on Windows VM') + vault_cert = VaultCertificate(certificate_url=certificate, certificate_store=certificate_store) + vault_secret_group = next((x for x in vm.os_profile.secrets + if x.source_vault and x.source_vault.id.lower() == keyvault.lower()), None) + if vault_secret_group: + vault_secret_group.vault_certificates.append(vault_cert) + else: + vault_secret_group = VaultSecretGroup(source_vault=SubResource(id=keyvault), vault_certificates=[vault_cert]) + vm.os_profile.secrets.append(vault_secret_group) + vm = set_vm(cmd, vm) + return vm.os_profile.secrets + + +def list_vm_secrets(cmd, resource_group_name, vm_name): + vm = get_vm(cmd, resource_group_name, vm_name) + if vm.os_profile: + return vm.os_profile.secrets + return [] + + +def remove_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate=None): + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + + # support 2 kinds of filter: + # a. if only keyvault is supplied, we delete its whole vault group. + # b. if both keyvault and certificate are supplied, we only delete the specific cert entry. + + to_keep = vm.os_profile.secrets + keyvault_matched = [] + if keyvault: + keyvault = keyvault.lower() + keyvault_matched = [x for x in to_keep if x.source_vault and x.source_vault.id.lower() == keyvault] + + if keyvault and not certificate: + to_keep = [x for x in to_keep if x not in keyvault_matched] + elif certificate: + temp = keyvault_matched if keyvault else to_keep + cert_url_pattern = certificate.lower() + if '://' not in cert_url_pattern: # just a cert name? + cert_url_pattern = '/' + cert_url_pattern + '/' + for x in temp: + x.vault_certificates = ([v for v in x.vault_certificates + if not (v.certificate_url and cert_url_pattern in v.certificate_url.lower())]) + to_keep = [x for x in to_keep if x.vault_certificates] # purge all groups w/o any cert entries + + vm.os_profile.secrets = to_keep + vm = set_vm(cmd, vm) + return vm.os_profile.secrets + + +# endregion + + +# region VirtualMachines UnmanagedDisks +def attach_unmanaged_data_disk(cmd, resource_group_name, vm_name, new=False, vhd_uri=None, lun=None, + disk_name=None, size_gb=1023, caching=None): + DataDisk, DiskCreateOptionTypes, VirtualHardDisk = cmd.get_models( + 'DataDisk', 'DiskCreateOptionTypes', 'VirtualHardDisk') + if not new and not disk_name: + raise CLIError('Please provide the name of the existing disk to attach') + create_option = DiskCreateOptionTypes.empty if new else DiskCreateOptionTypes.attach + + vm = get_vm_to_update(cmd, resource_group_name, vm_name) + if disk_name is None: + import datetime + disk_name = vm_name + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + # pylint: disable=no-member + if vhd_uri is None: + if not hasattr(vm.storage_profile.os_disk, 'vhd') or not vm.storage_profile.os_disk.vhd: + raise CLIError('Adding unmanaged disks to a VM with managed disks is not supported') + blob_uri = vm.storage_profile.os_disk.vhd.uri + vhd_uri = blob_uri[0:blob_uri.rindex('/') + 1] + disk_name + '.vhd' + + if lun is None: + lun = _get_disk_lun(vm.storage_profile.data_disks) + disk = DataDisk(lun=lun, vhd=VirtualHardDisk(uri=vhd_uri), name=disk_name, + create_option=create_option, + caching=caching, disk_size_gb=size_gb if new else None) + if vm.storage_profile.data_disks is None: + vm.storage_profile.data_disks = [] + vm.storage_profile.data_disks.append(disk) + return set_vm(cmd, vm) + + +def list_unmanaged_disks(cmd, resource_group_name, vm_name): + vm = get_vm(cmd, resource_group_name, vm_name) + return vm.storage_profile.data_disks # pylint: disable=no-member + + +# endregion + + +# region VirtualMachines Users +def _update_linux_access_extension(cmd, vm_instance, resource_group_name, protected_settings, + no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + + VirtualMachineExtension = cmd.get_models('VirtualMachineExtension') + + # pylint: disable=no-member + instance_name = _get_extension_instance_name(vm_instance.instance_view, + extension_mappings[_LINUX_ACCESS_EXT]['publisher'], + _LINUX_ACCESS_EXT, + _ACCESS_EXT_HANDLER_NAME) + + publisher, version, auto_upgrade = _get_access_extension_upgrade_info( + vm_instance.resources, _LINUX_ACCESS_EXT) + + ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member + publisher=publisher, + type_properties_type=_LINUX_ACCESS_EXT, + protected_settings=protected_settings, + type_handler_version=version, + settings={}, + auto_upgrade_minor_version=auto_upgrade) + return sdk_no_wait(no_wait, client.virtual_machine_extensions.begin_create_or_update, + resource_group_name, vm_instance.name, instance_name, ext) + + +def _set_linux_user(cmd, vm_instance, resource_group_name, username, + password=None, ssh_key_value=None, no_wait=False): + protected_settings = {} + protected_settings['username'] = username + if password: + protected_settings['password'] = password + elif not ssh_key_value and not password: # default to ssh + ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub') + + if ssh_key_value: + protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value) + + if no_wait: + return _update_linux_access_extension(cmd, vm_instance, resource_group_name, + protected_settings, no_wait) + poller = _update_linux_access_extension(cmd, vm_instance, resource_group_name, + protected_settings) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'setting user', 'done')(poller) + + +def _reset_windows_admin(cmd, vm_instance, resource_group_name, username, password, no_wait=False): + '''Update the password. You can only change the password. Adding a new user is not supported. ''' + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineExtension = cmd.get_models('VirtualMachineExtension') + + publisher, version, auto_upgrade = _get_access_extension_upgrade_info( + vm_instance.resources, _WINDOWS_ACCESS_EXT) + # pylint: disable=no-member + instance_name = _get_extension_instance_name(vm_instance.instance_view, + publisher, + _WINDOWS_ACCESS_EXT, + _ACCESS_EXT_HANDLER_NAME) + + ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member + publisher=publisher, + type_properties_type=_WINDOWS_ACCESS_EXT, + protected_settings={'Password': password}, + type_handler_version=version, + settings={'UserName': username}, + auto_upgrade_minor_version=auto_upgrade) + + if no_wait: + return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update, + resource_group_name, vm_instance.name, instance_name, ext) + poller = client.virtual_machine_extensions.begin_create_or_update( + resource_group_name, vm_instance.name, instance_name, ext) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting admin', 'done')(poller) + + +def set_user(cmd, resource_group_name, vm_name, username, password=None, ssh_key_value=None, + no_wait=False): + vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView') + if _is_linux_os(vm): + return _set_linux_user(cmd, vm, resource_group_name, username, password, ssh_key_value, no_wait) + if ssh_key_value: + raise CLIError('SSH key is not appliable on a Windows VM') + return _reset_windows_admin(cmd, vm, resource_group_name, username, password, no_wait) + + +def delete_user(cmd, resource_group_name, vm_name, username, no_wait=False): + vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView') + if not _is_linux_os(vm): + raise CLIError('Deleting a user is not supported on Windows VM') + if no_wait: + return _update_linux_access_extension(cmd, vm, resource_group_name, + {'remove_user': username}, no_wait) + poller = _update_linux_access_extension(cmd, vm, resource_group_name, + {'remove_user': username}) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'deleting user', 'done')(poller) + + +def reset_linux_ssh(cmd, resource_group_name, vm_name, no_wait=False): + vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView') + if not _is_linux_os(vm): + raise CLIError('Resetting SSH is not supported in Windows VM') + if no_wait: + return _update_linux_access_extension(cmd, vm, resource_group_name, + {'reset_ssh': True}, no_wait) + poller = _update_linux_access_extension(cmd, vm, resource_group_name, + {'reset_ssh': True}) + return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting SSH', 'done')(poller) + + +# endregion + + +# region VirtualMachineScaleSets +def assign_vmss_identity(cmd, resource_group_name, vmss_name, assign_identity=None, identity_role=None, + identity_role_id=None, identity_scope=None): + VirtualMachineScaleSetIdentity, UpgradeMode, ResourceIdentityType, VirtualMachineScaleSetUpdate = cmd.get_models( + 'VirtualMachineScaleSetIdentity', 'UpgradeMode', 'ResourceIdentityType', 'VirtualMachineScaleSetUpdate') + IdentityUserAssignedIdentitiesValue = cmd.get_models( + 'VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue') or cmd.get_models('UserAssignedIdentitiesValue') + from azure.cli.core.commands.arm import assign_identity as assign_identity_helper + client = _compute_client_factory(cmd.cli_ctx) + _, _, external_identities, enable_local_identity = _build_identities_info(assign_identity) + + def getter(): + return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + + def setter(vmss, external_identities=external_identities): + + if vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned_user_assigned: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned and external_identities: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif vmss.identity and vmss.identity.type == ResourceIdentityType.user_assigned and enable_local_identity: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif external_identities and enable_local_identity: + identity_types = ResourceIdentityType.system_assigned_user_assigned + elif external_identities: + identity_types = ResourceIdentityType.user_assigned + else: + identity_types = ResourceIdentityType.system_assigned + vmss.identity = VirtualMachineScaleSetIdentity(type=identity_types) + if external_identities: + vmss.identity.user_assigned_identities = {} + for identity in external_identities: + vmss.identity.user_assigned_identities[identity] = IdentityUserAssignedIdentitiesValue() + vmss_patch = VirtualMachineScaleSetUpdate() + vmss_patch.identity = vmss.identity + poller = client.virtual_machine_scale_sets.begin_update(resource_group_name, vmss_name, vmss_patch) + return LongRunningOperation(cmd.cli_ctx)(poller) + + assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + if vmss.upgrade_policy.mode == UpgradeMode.manual: + logger.warning("With manual upgrade mode, you will need to run 'az vmss update-instances -g %s -n %s " + "--instance-ids *' to propagate the change", resource_group_name, vmss_name) + + return _construct_identity_info(identity_scope, identity_role, vmss.identity.principal_id, + vmss.identity.user_assigned_identities) + + +# pylint: disable=too-many-locals, too-many-statements +def create_vmss(cmd, vmss_name, resource_group_name, image=None, + disable_overprovision=None, instance_count=2, + location=None, tags=None, upgrade_policy_mode='manual', validate=False, + admin_username=None, admin_password=None, authentication_type=None, + vm_sku=None, no_wait=False, + ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False, ssh_key_type=None, + load_balancer=None, load_balancer_sku=None, application_gateway=None, + app_gateway_subnet_address_prefix=None, + app_gateway_sku='Standard_Large', app_gateway_capacity=10, + backend_pool_name=None, nat_pool_name=None, backend_port=None, health_probe=None, + public_ip_address=None, public_ip_address_allocation=None, + public_ip_address_dns_name=None, accelerated_networking=None, + public_ip_per_vm=False, vm_domain_name=None, dns_servers=None, nsg=None, + os_caching=None, data_caching=None, + storage_container_name='vhds', storage_sku=None, + os_type=None, os_disk_name=None, + use_unmanaged_disk=False, data_disk_sizes_gb=None, disk_info=None, + vnet_name=None, vnet_address_prefix='10.0.0.0/16', + subnet=None, subnet_address_prefix=None, + os_offer=None, os_publisher=None, os_sku=None, os_version=None, + load_balancer_type=None, app_gateway_type=None, vnet_type=None, + public_ip_address_type=None, storage_profile=None, + single_placement_group=None, custom_data=None, secrets=None, platform_fault_domain_count=None, + plan_name=None, plan_product=None, plan_publisher=None, plan_promotion_code=None, license_type=None, + assign_identity=None, identity_scope=None, identity_role=None, + identity_role_id=None, zones=None, priority=None, eviction_policy=None, + application_security_groups=None, ultra_ssd_enabled=None, + ephemeral_os_disk=None, ephemeral_os_disk_placement=None, + proximity_placement_group=None, aux_subscriptions=None, terminate_notification_time=None, + max_price=None, computer_name_prefix=None, orchestration_mode=None, scale_in_policy=None, + os_disk_encryption_set=None, data_disk_encryption_sets=None, data_disk_iops=None, data_disk_mbps=None, + automatic_repairs_grace_period=None, specialized=None, os_disk_size_gb=None, encryption_at_host=None, + host_group=None, max_batch_instance_percent=None, max_unhealthy_instance_percent=None, + max_unhealthy_upgraded_instance_percent=None, pause_time_between_batches=None, + enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, edge_zone=None, + user_data=None, network_api_version=None, enable_spot_restore=None, spot_restore_timeout=None, + capacity_reservation_group=None, enable_auto_update=None, patch_mode=None, enable_agent=None, + security_type=None, enable_secure_boot=None, enable_vtpm=None, automatic_repairs_action=None, + v_cpus_available=None, v_cpus_per_core=None, accept_term=None, + disable_integrity_monitoring=None, # Unused + enable_integrity_monitoring=False, enable_auto_os_upgrade=None, + os_disk_security_encryption_type=None, os_disk_secure_vm_disk_encryption_set=None, + os_disk_delete_option=None, data_disk_delete_option=None, regular_priority_count=None, + regular_priority_percentage=None, disk_controller_type=None, nat_rule_name=None, + enable_osimage_notification=None, max_surge=None, disable_integrity_monitoring_autoupgrade=False, + enable_hibernation=None, enable_proxy_agent=None, proxy_agent_mode=None, + security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, + enable_resilient_creation=None, enable_resilient_deletion=None, + additional_scheduled_events=None, enable_user_reboot_scheduled_events=None, + enable_user_redeploy_scheduled_events=None, + skuprofile_vmsizes=None, skuprofile_allostrat=None): + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.cli.core.util import random_string, hash_string + from azure.cli.core.commands.arm import ArmTemplateBuilder + from azure.cli.command_modules.vm.azure_stack._template_builder import (StorageProfile, build_vmss_resource, + build_vnet_resource, + build_public_ip_resource, + build_load_balancer_resource, + build_vmss_storage_account_pool_resource, + build_application_gateway_resource, + build_msi_role_assignment, + build_nsg_resource, + build_nat_rule_v2) + + # The default load balancer will be expected to be changed from Basic to Standard, and Basic will be removed. + # In order to avoid breaking change which has a big impact to users, + # we use the hint to guide users to use Standard load balancer to create VMSS in the first stage. + if load_balancer_sku == 'Basic': + logger.warning(remove_basic_option_msg, "--lb-sku Standard") + + # Build up the ARM template + master_template = ArmTemplateBuilder() + + uniform_str = 'Uniform' + flexible_str = 'Flexible' + if orchestration_mode: + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + + if disk_info: + storage_sku = disk_info['os'].get('storageAccountType') + + subscription_id = get_subscription_id(cmd.cli_ctx) + + if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set): + os_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set) + if os_disk_secure_vm_disk_encryption_set is not None and \ + not is_valid_resource_id(os_disk_secure_vm_disk_encryption_set): + os_disk_secure_vm_disk_encryption_set = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_secure_vm_disk_encryption_set) + + if data_disk_encryption_sets is None: + data_disk_encryption_sets = [] + for i, des in enumerate(data_disk_encryption_sets): + if des is not None and not is_valid_resource_id(des): + data_disk_encryption_sets[i] = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='diskEncryptionSets', name=des) + + network_id_template = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Network') + + vmss_id = resource_id( + subscription=subscription_id, resource_group=resource_group_name, + namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss_name) + + scrubbed_name = vmss_name.replace('-', '').lower()[:5] + naming_prefix = '{}{}'.format(scrubbed_name, + hash_string(vmss_id, + length=(9 - len(scrubbed_name)), + force_lower=True)) + + # determine final defaults and calculated values + tags = tags or {} + os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vmss_id, length=10)) + if use_unmanaged_disk else None) + load_balancer = load_balancer or '{}LB'.format(vmss_name) + app_gateway = application_gateway or '{}AG'.format(vmss_name) + backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer or application_gateway) + + vmss_dependencies = [] + + # VNET will always be a dependency + if vnet_type == 'new': + vnet_name = vnet_name or '{}VNET'.format(vmss_name) + subnet = subnet or '{}Subnet'.format(vmss_name) + vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + vnet = build_vnet_resource( + cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix, edge_zone=edge_zone) + if app_gateway_type: + vnet['properties']['subnets'].append({ + 'name': 'appGwSubnet', + 'properties': { + 'addressPrefix': app_gateway_subnet_address_prefix + } + }) + master_template.add_resource(vnet) + if subnet: + subnet_id = subnet if is_valid_resource_id(subnet) else \ + '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet) + else: + subnet_id = None + + if vnet_name: + gateway_subnet_id = ('{}/virtualNetworks/{}/subnets/appGwSubnet'.format(network_id_template, vnet_name) + if app_gateway_type == 'new' else None) + else: + gateway_subnet_id = None + + # public IP is used by either load balancer/application gateway + public_ip_address_id = None + if public_ip_address: + public_ip_address_id = (public_ip_address if is_valid_resource_id(public_ip_address) + else '{}/publicIPAddresses/{}'.format(network_id_template, + public_ip_address)) + + def _get_public_ip_address_allocation(value, sku): + if not value: + value = 'Static' if (sku and sku.lower() == 'standard') else 'Dynamic' + return value + + # Handle load balancer creation + if load_balancer_type == 'new': + vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer)) + + lb_dependencies = [] + if vnet_type == 'new': + lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + if public_ip_address_type == 'new': + public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer) + lb_dependencies.append( + 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) + master_template.add_resource(build_public_ip_resource( + cmd, public_ip_address, location, tags, + _get_public_ip_address_allocation(public_ip_address_allocation, load_balancer_sku), + public_ip_address_dns_name, load_balancer_sku, zones, edge_zone=edge_zone)) + public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, + public_ip_address) + + if nat_rule_name and nat_pool_name: + from azure.cli.core.azclierror import MutuallyExclusiveArgumentError + raise MutuallyExclusiveArgumentError( + 'Please do not pass in both "--nat-pool-name" and "--nat-rule-name" parameters at the same time.' + '"--nat-rule-name" parameter is recommended') + + is_basic_lb_sku = not load_balancer_sku or load_balancer_sku.lower() != 'standard' + # calculate default names if not provided + if orchestration_mode.lower() == flexible_str.lower(): + # inbound nat pools are not supported on VMSS Flex + nat_pool_name = None + elif nat_pool_name or (not nat_rule_name and is_basic_lb_sku): + nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer) + + if not backend_port: + backend_port = 3389 if os_type == 'windows' else 22 + + frontend_ip_name = 'loadBalancerFrontEnd' + lb_resource = build_load_balancer_resource( + cmd, load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port, + frontend_ip_name, public_ip_address_id, subnet_id, private_ip_address='', + private_ip_allocation='Dynamic', sku=load_balancer_sku, instance_count=instance_count, + disable_overprovision=disable_overprovision, edge_zone=edge_zone) + lb_resource['dependsOn'] = lb_dependencies + master_template.add_resource(lb_resource) + + # Per https://docs.microsoft.com/azure/load-balancer/load-balancer-standard-overview#nsg + if load_balancer_sku and load_balancer_sku.lower() == 'standard' and nsg is None and os_type: + nsg_name = '{}NSG'.format(vmss_name) + master_template.add_resource(build_nsg_resource( + None, nsg_name, location, tags, 'rdp' if os_type.lower() == 'windows' else 'ssh')) + nsg = "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsg_name) + vmss_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg_name)) + + # Since NAT rule V2 can work for both Uniform and Flex VMSS, but basic LB SKU cannot fully support it + # So when users use Standard LB SKU, CLI uses NAT rule V2 by default + if not nat_pool_name: + + if nat_rule_name and is_basic_lb_sku: + logger.warning( + 'Since the basic SKU of load balancer cannot fully support NAT rule V2, ' + 'it is recommended to specify "--lb-sku Standard" to use standard SKU instead.') + + nat_rule_name = nat_rule_name or 'NatRule' + # The nested resource must follow the pattern parent_resource_name/nested_res_name + nat_rule_name = '{}/{}'.format(load_balancer, nat_rule_name) + nat_rule = build_nat_rule_v2(cmd, nat_rule_name, location, load_balancer, frontend_ip_name, + backend_pool_name, backend_port, instance_count, disable_overprovision) + master_template.add_resource(nat_rule) + + # Or handle application gateway creation + if app_gateway_type == 'new': + vmss_dependencies.append('Microsoft.Network/applicationGateways/{}'.format(app_gateway)) + + ag_dependencies = [] + if vnet_type == 'new': + ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name)) + if public_ip_address_type == 'new': + public_ip_address = public_ip_address or '{}PublicIP'.format(app_gateway) + ag_dependencies.append( + 'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address)) + master_template.add_resource(build_public_ip_resource( + cmd, public_ip_address, location, tags, + _get_public_ip_address_allocation(public_ip_address_allocation, None), public_ip_address_dns_name, + None, zones)) + public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template, + public_ip_address) + + # calculate default names if not provided + backend_port = backend_port or 80 + + ag_resource = build_application_gateway_resource( + cmd, app_gateway, location, tags, backend_pool_name, backend_port, 'appGwFrontendIP', + public_ip_address_id, subnet_id, gateway_subnet_id, private_ip_address='', + private_ip_allocation='Dynamic', sku=app_gateway_sku, capacity=app_gateway_capacity) + ag_resource['dependsOn'] = ag_dependencies + master_template.add_variable( + 'appGwID', + "[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(app_gateway)) + master_template.add_resource(ag_resource) + + # create storage accounts if needed for unmanaged disk storage + if storage_profile == StorageProfile.SAPirImage: + master_template.add_resource(build_vmss_storage_account_pool_resource( + cmd, 'storageLoop', location, tags, storage_sku, edge_zone)) + master_template.add_variable('storageAccountNames', [ + '{}{}'.format(naming_prefix, x) for x in range(5) + ]) + master_template.add_variable('vhdContainers', [ + "[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format( + x, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name) for x in range(5) + ]) + vmss_dependencies.append('storageLoop') + + backend_address_pool_id = None + inbound_nat_pool_id = None + if load_balancer_type or app_gateway_type: + network_balancer = load_balancer if load_balancer_type else app_gateway + balancer_type = 'loadBalancers' if load_balancer_type else 'applicationGateways' + + if is_valid_resource_id(network_balancer): + # backend address pool needed by load balancer or app gateway + backend_address_pool_id = '{}/backendAddressPools/{}'.format(network_balancer, backend_pool_name) + if nat_pool_name: + inbound_nat_pool_id = '{}/inboundNatPools/{}'.format(network_balancer, nat_pool_name) + else: + # backend address pool needed by load balancer or app gateway + backend_address_pool_id = '{}/{}/{}/backendAddressPools/{}'.format( + network_id_template, balancer_type, network_balancer, backend_pool_name) + if nat_pool_name: + inbound_nat_pool_id = '{}/{}/{}/inboundNatPools/{}'.format( + network_id_template, balancer_type, network_balancer, nat_pool_name) + + if health_probe and not is_valid_resource_id(health_probe): + health_probe = '{}/loadBalancers/{}/probes/{}'.format(network_id_template, load_balancer, health_probe) + + ip_config_name = '{}IPConfig'.format(naming_prefix) + nic_name = '{}Nic'.format(naming_prefix) + + if custom_data: + custom_data = read_content_if_is_file(custom_data) + + if user_data: + user_data = read_content_if_is_file(user_data) + + if secrets: + secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets]) + + if computer_name_prefix is not None and isinstance(computer_name_prefix, str): + naming_prefix = computer_name_prefix + + if orchestration_mode.lower() == uniform_str.lower(): + computer_name_prefix = naming_prefix + + if os_version and os_version != 'latest': + logger.warning('You are deploying VMSS pinned to a specific image version from Azure Marketplace. ' + 'Consider using "latest" as the image version.') + + vmss_resource = build_vmss_resource( + cmd=cmd, name=vmss_name, computer_name_prefix=computer_name_prefix, location=location, tags=tags, + overprovision=not disable_overprovision if orchestration_mode.lower() == uniform_str.lower() else None, + upgrade_policy_mode=upgrade_policy_mode, vm_sku=vm_sku, + instance_count=instance_count, ip_config_name=ip_config_name, nic_name=nic_name, subnet_id=subnet_id, + public_ip_per_vm=public_ip_per_vm, vm_domain_name=vm_domain_name, dns_servers=dns_servers, nsg=nsg, + accelerated_networking=accelerated_networking, admin_username=admin_username, + authentication_type=authentication_type, storage_profile=storage_profile, os_disk_name=os_disk_name, + disk_info=disk_info, os_type=os_type, image=image, admin_password=admin_password, + ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, os_publisher=os_publisher, os_offer=os_offer, + os_sku=os_sku, os_version=os_version, backend_address_pool_id=backend_address_pool_id, + inbound_nat_pool_id=inbound_nat_pool_id, health_probe=health_probe, + single_placement_group=single_placement_group, platform_fault_domain_count=platform_fault_domain_count, + custom_data=custom_data, secrets=secrets, license_type=license_type, zones=zones, priority=priority, + eviction_policy=eviction_policy, application_security_groups=application_security_groups, + ultra_ssd_enabled=ultra_ssd_enabled, proximity_placement_group=proximity_placement_group, + terminate_notification_time=terminate_notification_time, max_price=max_price, + scale_in_policy=scale_in_policy, os_disk_encryption_set=os_disk_encryption_set, + data_disk_encryption_sets=data_disk_encryption_sets, data_disk_iops=data_disk_iops, + data_disk_mbps=data_disk_mbps, automatic_repairs_grace_period=automatic_repairs_grace_period, + specialized=specialized, os_disk_size_gb=os_disk_size_gb, encryption_at_host=encryption_at_host, + host_group=host_group, max_batch_instance_percent=max_batch_instance_percent, + max_unhealthy_instance_percent=max_unhealthy_instance_percent, + max_unhealthy_upgraded_instance_percent=max_unhealthy_upgraded_instance_percent, + pause_time_between_batches=pause_time_between_batches, enable_cross_zone_upgrade=enable_cross_zone_upgrade, + prioritize_unhealthy_instances=prioritize_unhealthy_instances, edge_zone=edge_zone, user_data=user_data, + orchestration_mode=orchestration_mode, network_api_version=network_api_version, + enable_spot_restore=enable_spot_restore, spot_restore_timeout=spot_restore_timeout, + capacity_reservation_group=capacity_reservation_group, enable_auto_update=enable_auto_update, + patch_mode=patch_mode, enable_agent=enable_agent, security_type=security_type, + enable_secure_boot=enable_secure_boot, enable_vtpm=enable_vtpm, + automatic_repairs_action=automatic_repairs_action, v_cpus_available=v_cpus_available, + v_cpus_per_core=v_cpus_per_core, os_disk_security_encryption_type=os_disk_security_encryption_type, + os_disk_secure_vm_disk_encryption_set=os_disk_secure_vm_disk_encryption_set, + os_disk_delete_option=os_disk_delete_option, regular_priority_count=regular_priority_count, + regular_priority_percentage=regular_priority_percentage, disk_controller_type=disk_controller_type, + enable_osimage_notification=enable_osimage_notification, max_surge=max_surge, + enable_hibernation=enable_hibernation, enable_auto_os_upgrade=enable_auto_os_upgrade, + enable_proxy_agent=enable_proxy_agent, proxy_agent_mode=proxy_agent_mode, + security_posture_reference_id=security_posture_reference_id, + security_posture_reference_exclude_extensions=security_posture_reference_exclude_extensions, + enable_resilient_vm_creation=enable_resilient_creation, + enable_resilient_vm_deletion=enable_resilient_deletion, + additional_scheduled_events=additional_scheduled_events, + enable_user_reboot_scheduled_events=enable_user_reboot_scheduled_events, + enable_user_redeploy_scheduled_events=enable_user_redeploy_scheduled_events, + skuprofile_vmsizes=skuprofile_vmsizes, + skuprofile_allostrat=skuprofile_allostrat) + + vmss_resource['dependsOn'] = vmss_dependencies + + if plan_name: + vmss_resource['plan'] = { + 'name': plan_name, + 'publisher': plan_publisher, + 'product': plan_product, + 'promotionCode': plan_promotion_code + } + + enable_local_identity = None + if assign_identity is not None: + vmss_resource['identity'], _, _, enable_local_identity = _build_identities_info( + assign_identity) + if identity_scope: + role_assignment_guid = str(_gen_guid()) + master_template.add_resource(build_msi_role_assignment(vmss_name, vmss_id, identity_role_id, + role_assignment_guid, identity_scope, False)) + else: + raise CLIError('usage error: --orchestration-mode (Uniform | Flexible)') + + master_template.add_resource(vmss_resource) + master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets', + output_type='object') + + if admin_password: + master_template.add_secure_parameter('adminPassword', admin_password) + + template = master_template.build() + parameters = master_template.build_parameters() + + # deploy ARM template + deployment_name = 'vmss_deploy_' + random_string(32) + client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, + aux_subscriptions=aux_subscriptions).deployments + + DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') + + if validate: + from azure.cli.command_modules.vm.azure_stack._vm_utils import log_pprint_template + log_pprint_template(template) + log_pprint_template(parameters) + + Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) + deployment = Deployment(properties=properties) + if validate: + if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): + validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment) + return LongRunningOperation(cmd.cli_ctx)(validation_poller) + + return client.validate(resource_group_name, deployment_name, deployment) + + # creates the VMSS deployment + deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)( + sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)) + + if orchestration_mode.lower() == uniform_str.lower() and assign_identity is not None: + vmss_info = get_vmss(cmd, resource_group_name, vmss_name) + if enable_local_identity and not identity_scope: + _show_missing_access_warning(resource_group_name, vmss_name, 'vmss') + deployment_result['vmss']['identity'] = _construct_identity_info(identity_scope, identity_role, + vmss_info.identity.principal_id, + vmss_info.identity.user_assigned_identities) + # Guest Attestation Extension and enable System Assigned MSI by default + is_trusted_launch = security_type and security_type.lower() == 'trustedlaunch' and \ + enable_vtpm and enable_secure_boot + if is_trusted_launch and enable_integrity_monitoring: + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + vmss.virtual_machine_profile.storage_profile.image_reference = None + VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( + 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') + if vmss.virtual_machine_profile.storage_profile.os_disk.os_type == 'Linux': + publisher = 'Microsoft.Azure.Security.LinuxAttestation' + if vmss.virtual_machine_profile.storage_profile.os_disk.os_type == 'Windows': + publisher = 'Microsoft.Azure.Security.WindowsAttestation' + version = _normalize_extension_version(cmd.cli_ctx, publisher, 'GuestAttestation', None, vmss.location) + ext = VirtualMachineScaleSetExtension(name='GuestAttestation', + publisher=publisher, + type_properties_type='GuestAttestation', + protected_settings=None, + type_handler_version=version, + settings=None, + auto_upgrade_minor_version=True, + provision_after_extensions=None, + enable_automatic_upgrade=not disable_integrity_monitoring_autoupgrade) + if not vmss.virtual_machine_profile.extension_profile: + vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[]) + vmss.virtual_machine_profile.extension_profile.extensions.append(ext) + try: + LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_scale_sets.begin_create_or_update( + resource_group_name, vmss_name, vmss)) + logger.info('Guest Attestation Extension has been successfully installed by default' + 'when Trusted Launch configuration is met') + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=['*']) + LongRunningOperation(cmd.cli_ctx)(client.virtual_machine_scale_sets.begin_update_instances( + resource_group_name, vmss_name, instance_ids)) + except Exception as e: + logger.error('Failed to install Guest Attestation Extension for Trusted Launch. %s', e) + + return deployment_result + + +def _build_identities_info(identities): + from ._vm_utils import MSI_LOCAL_ID + identities = identities or [] + identity_types = [] + if not identities or MSI_LOCAL_ID in identities: + identity_types.append('SystemAssigned') + external_identities = [x for x in identities if x != MSI_LOCAL_ID] + if external_identities: + identity_types.append('UserAssigned') + identity_types = ','.join(identity_types) + info = {'type': identity_types} + if external_identities: + info['userAssignedIdentities'] = {e: {} for e in external_identities} + return (info, identity_types, external_identities, 'SystemAssigned' in identity_types) + + +def _build_identities_info_from_system_user_assigned(cmd, mi_system_assigned, mi_user_assigned): + IdentityType, UserAssignedIdentitiesValue = cmd.get_models('DiskEncryptionSetIdentityType', + 'UserAssignedIdentitiesValue', + operation_group='disk_encryption_sets') + + identity_types = IdentityType.SYSTEM_ASSIGNED + user_assigned_identities = None + if mi_user_assigned: + if mi_system_assigned: + identity_types = IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED + else: + identity_types = IdentityType.USER_ASSIGNED + + default_user_identity = UserAssignedIdentitiesValue() + user_assigned_identities = dict.fromkeys(mi_user_assigned, default_user_identity) + + return identity_types, user_assigned_identities + + +def deallocate_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, hibernate=None): + client = _compute_client_factory(cmd.cli_ctx) + # This is a walkaround because the REST service of `VirtualMachineScaleSetVMs#begin_deallocate` + # does not accept `hibernate` at present + if instance_ids and len(instance_ids) == 1 and hibernate is None: + return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.begin_deallocate, + resource_group_name, vm_scale_set_name, instance_ids[0]) + + VirtualMachineScaleSetVMInstanceIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceIDs') + vm_instance_i_ds = VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids) + if hibernate is not None: + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_deallocate, + resource_group_name, vm_scale_set_name, vm_instance_i_ds, hibernate=hibernate) + else: + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_deallocate, + resource_group_name, vm_scale_set_name, vm_instance_i_ds) + + +def delete_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_delete_instances, + resource_group_name, vm_scale_set_name, instance_ids) + + +def get_vmss(cmd, resource_group_name, name, instance_id=None, include_user_data=False): + client = _compute_client_factory(cmd.cli_ctx) + + expand = None + if include_user_data: + expand = 'userData' + + if instance_id is not None: + if cmd.supported_api_version(min_api='2020-12-01', operation_group='virtual_machine_scale_sets'): + return client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, + vm_scale_set_name=name, instance_id=instance_id, + expand=expand) + return client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, + vm_scale_set_name=name, instance_id=instance_id) + + if cmd.supported_api_version(min_api='2021-03-01', operation_group='virtual_machine_scale_sets'): + return client.virtual_machine_scale_sets.get(resource_group_name, name, expand=expand) + return client.virtual_machine_scale_sets.get(resource_group_name, name) + + +def _check_vmss_hyper_v_generation(cli_ctx, vmss): + hyper_v_generation = get_hyper_v_generation_from_vmss( + cli_ctx, vmss.virtual_machine_profile.storage_profile.image_reference, vmss.location) + security_profile = vmss.virtual_machine_profile.security_profile + security_type = security_profile.security_type if security_profile else None + + if hyper_v_generation == "V1" or (hyper_v_generation == "V2" and security_type is None): + logger.warning("Trusted Launch security type is supported on Hyper-V Generation 2 OS Images. " + "To know more please visit " + "https://learn.microsoft.com/en-us/azure/virtual-machines/trusted-launch") + elif hyper_v_generation == "V2" and security_type == "ConfidentialVM": + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError("{} is already configured with {}. " + "Security Configuration cannot be updated from ConfidentialVM to " + "TrustedLaunch.".format(vmss.name, security_type)) + + +def get_vmss_modified(cmd, resource_group_name, name, instance_id=None, security_type=None): + client = _compute_client_factory(cmd.cli_ctx) + if instance_id is not None: + vms = client.virtual_machine_scale_set_vms.get(resource_group_name=resource_group_name, + vm_scale_set_name=name, instance_id=instance_id) + # To avoid unnecessary permission check of image + if hasattr(vms, "storage_profile") and vms.storage_profile: + vms.storage_profile.image_reference = None + return vms + + vmss = client.virtual_machine_scale_sets.get(resource_group_name, name) + if security_type == 'TrustedLaunch': + _check_vmss_hyper_v_generation(cmd.cli_ctx, vmss) + # To avoid unnecessary permission check of image + if hasattr(vmss, "virtual_machine_profile") and vmss.virtual_machine_profile \ + and vmss.virtual_machine_profile.storage_profile: + vmss.virtual_machine_profile.storage_profile.image_reference = None + return vmss + + +def get_vmss_instance_view(cmd, resource_group_name, vm_scale_set_name, instance_id=None): + client = _compute_client_factory(cmd.cli_ctx) + if instance_id: + if instance_id == '*': + return [x.instance_view for x in (client.virtual_machine_scale_set_vms.list( + resource_group_name=resource_group_name, virtual_machine_scale_set_name=vm_scale_set_name, + select='instanceView', expand='instanceView'))] + + return client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=resource_group_name, + vm_scale_set_name=vm_scale_set_name, + instance_id=instance_id) + + return client.virtual_machine_scale_sets.get_instance_view(resource_group_name, vm_scale_set_name) + + +def list_vmss_instance_connection_info(cmd, resource_group_name, vm_scale_set_name): + from azure.mgmt.core.tools import parse_resource_id + + LBShow = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.lb").Show + PublicIPAddress = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "network.public_ip").Show + + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name) + + from ._vm_utils import raise_unsupported_error_for_flex_vmss + raise_unsupported_error_for_flex_vmss( + vmss, 'This command is not available for VMSS in Flex mode. ' + 'Please use the "az network public-ip list/show" to retrieve networking information.') + + # find the load balancer + nic_configs = vmss.virtual_machine_profile.network_profile.network_interface_configurations + primary_nic_config = next((n for n in nic_configs if n.primary), None) + if primary_nic_config is None: + raise CLIError('could not find a primary NIC which is needed to search to load balancer') + + res_id = None + for ip in primary_nic_config.ip_configurations: + if ip.load_balancer_inbound_nat_pools: + res_id = ip.load_balancer_inbound_nat_pools[0].id + break + if ip.load_balancer_backend_address_pools: + res_id = ip.load_balancer_backend_address_pools[0].id + break + + if not res_id: + raise ResourceNotFoundError('No load balancer exists to retrieve public IP address') + + lb_info = parse_resource_id(res_id) + lb_name = lb_info['name'] + lb_rg = lb_info['resource_group'] + + # get public ip + lb = LBShow(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': lb_name, + 'resource_group': lb_rg + }) + if 'publicIPAddress' in lb['frontendIPConfigurations'][0]: + res_id = lb['frontendIPConfigurations'][0]['publicIPAddress']['id'] + public_ip_info = parse_resource_id(res_id) + public_ip_name = public_ip_info['name'] + public_ip_rg = public_ip_info['resource_group'] + public_ip = PublicIPAddress(cli_ctx=cmd.cli_ctx)(command_args={ + 'name': public_ip_name, + 'resource_group': public_ip_rg + }) + public_ip_address = public_ip['ipAddress'] if 'ipAddress' in public_ip else None + # For NAT pool, get the frontend port and VMSS instance from inboundNatRules + is_nat_pool = True + instance_addresses = {} + for rule in lb['inboundNatRules']: + # If backend_ip_configuration does not exist, it means that NAT rule V2 is used + if 'backendIPConfiguration' not in rule or not rule['backendIPConfiguration']: + is_nat_pool = False + break + instance_id = parse_resource_id(rule['backendIPConfiguration']['id'])['child_name_1'] + instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address, + rule['frontendPort']) + if is_nat_pool: + return instance_addresses + + # For NAT rule V2, get the frontend port and VMSS instance from loadBalancerBackendAddresses + for backend_address_pool in lb['backendAddressPools']: + if 'loadBalancerBackendAddresses' not in backend_address_pool or \ + not backend_address_pool['loadBalancerBackendAddresses']: + raise CLIError('There is no connection information. ' + 'If you are using NAT rule V2, please confirm whether the load balancer SKU is Standard') + + for load_balancer_backend_addresse in backend_address_pool['loadBalancerBackendAddresses']: + + network_interface_ip_configuration = load_balancer_backend_addresse['networkInterfaceIPConfiguration'] + if not network_interface_ip_configuration or 'id' not in network_interface_ip_configuration: + continue + instance_id = parse_resource_id(network_interface_ip_configuration['id'])['child_name_1'] + + if not load_balancer_backend_addresse['inboundNatRulesPortMapping']: + continue + frontend_port = load_balancer_backend_addresse['inboundNatRulesPortMapping'][0]['frontendPort'] + instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address, frontend_port) + + return instance_addresses + raise CLIError('The VM scale-set uses an internal load balancer, hence no connection information') + + +def list_vmss_instance_public_ips(cmd, resource_group_name, vm_scale_set_name): + ListInstancePublicIps = import_aaz_by_profile(cmd.cli_ctx.cloud.profile, "vmss").ListInstancePublicIps + + compute_client = _compute_client_factory(cmd.cli_ctx) + vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name) + from ._vm_utils import raise_unsupported_error_for_flex_vmss + raise_unsupported_error_for_flex_vmss( + vmss, 'This command is not available for VMSS in Flex mode. ' + 'Please use the "az network public-ip list/show" to retrieve networking information.') + + result = ListInstancePublicIps(cli_ctx=cmd.cli_ctx)(command_args={ + 'vmss_name': vm_scale_set_name, + 'resource_group': resource_group_name + }) + # filter away over-provisioned instances which are deleted after 'create/update' returns + return [r for r in result if 'ipAddress' in r and r['ipAddress']] + + +def reimage_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, + force_update_os_disk_for_ephemeral=None, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + if instance_ids: + VirtualMachineScaleSetVMInstanceIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceIDs') + instance_ids = VirtualMachineScaleSetVMInstanceIDs(instance_ids=instance_ids) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_reimage_all, resource_group_name, + vm_scale_set_name, instance_ids) + if force_update_os_disk_for_ephemeral is not None: + VirtualMachineScaleSetReimageParameters = cmd.get_models('VirtualMachineScaleSetReimageParameters') + vm_scale_set_reimage_input = VirtualMachineScaleSetReimageParameters( + force_update_os_disk_for_ephemeral=force_update_os_disk_for_ephemeral) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_reimage, + resource_group_name, vm_scale_set_name, vm_scale_set_reimage_input) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_reimage, + resource_group_name, vm_scale_set_name) + + +def restart_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + if instance_ids is None: + instance_ids = ['*'] + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_restart, resource_group_name, vm_scale_set_name, + vm_instance_i_ds=instance_ids) + + +# pylint: disable=inconsistent-return-statements +def scale_vmss(cmd, resource_group_name, vm_scale_set_name, new_capacity, no_wait=False): + VirtualMachineScaleSet = cmd.get_models('VirtualMachineScaleSet') + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name) + # pylint: disable=no-member + if vmss.sku.capacity == new_capacity: + return + + vmss.sku.capacity = new_capacity + vmss_new = VirtualMachineScaleSet(location=vmss.location, sku=vmss.sku) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, + resource_group_name, vm_scale_set_name, vmss_new) + + +def start_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + if instance_ids is None: + instance_ids = ['*'] + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_start, + resource_group_name, vm_scale_set_name, vm_instance_i_ds=instance_ids) + + +def stop_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, skip_shutdown=False): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + if instance_ids is None: + instance_ids = ['*'] + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) + if cmd.supported_api_version(min_api='2020-06-01', operation_group='virtual_machine_scale_sets'): + return sdk_no_wait( + no_wait, client.virtual_machine_scale_sets.begin_power_off, resource_group_name, vm_scale_set_name, + vm_instance_i_ds=instance_ids, skip_shutdown=skip_shutdown) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_power_off, resource_group_name, + vm_scale_set_name, vm_instance_i_ds=instance_ids) + + +def update_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + VirtualMachineScaleSetVMInstanceRequiredIDs = cmd.get_models('VirtualMachineScaleSetVMInstanceRequiredIDs') + instance_ids = VirtualMachineScaleSetVMInstanceRequiredIDs(instance_ids=instance_ids) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_update_instances, + resource_group_name, vm_scale_set_name, instance_ids) + + +def update_vmss(cmd, resource_group_name, name, license_type=None, no_wait=False, instance_id=None, + protect_from_scale_in=None, protect_from_scale_set_actions=None, + enable_terminate_notification=None, terminate_notification_time=None, ultra_ssd_enabled=None, + scale_in_policy=None, priority=None, max_price=None, proximity_placement_group=None, + enable_automatic_repairs=None, automatic_repairs_grace_period=None, max_batch_instance_percent=None, + max_unhealthy_instance_percent=None, max_unhealthy_upgraded_instance_percent=None, + pause_time_between_batches=None, enable_cross_zone_upgrade=None, prioritize_unhealthy_instances=None, + user_data=None, enable_spot_restore=None, spot_restore_timeout=None, capacity_reservation_group=None, + vm_sku=None, ephemeral_os_disk_placement=None, force_deletion=None, enable_secure_boot=None, + enable_vtpm=None, automatic_repairs_action=None, v_cpus_available=None, v_cpus_per_core=None, + regular_priority_count=None, regular_priority_percentage=None, disk_controller_type=None, + enable_osimage_notification=None, custom_data=None, enable_hibernation=None, + security_type=None, enable_proxy_agent=None, proxy_agent_mode=None, + security_posture_reference_id=None, security_posture_reference_exclude_extensions=None, + max_surge=None, enable_resilient_creation=None, enable_resilient_deletion=None, + ephemeral_os_disk=None, ephemeral_os_disk_option=None, zones=None, additional_scheduled_events=None, + enable_user_reboot_scheduled_events=None, enable_user_redeploy_scheduled_events=None, + upgrade_policy_mode=None, enable_auto_os_upgrade=None, + skuprofile_vmsizes=None, skuprofile_allostrat=None, **kwargs): + vmss = kwargs['parameters'] + aux_subscriptions = None + # pylint: disable=too-many-boolean-expressions + if vmss and hasattr(vmss, 'virtual_machine_profile') and vmss.virtual_machine_profile and \ + vmss.virtual_machine_profile.storage_profile and \ + vmss.virtual_machine_profile.storage_profile.image_reference and \ + 'id' in vmss.virtual_machine_profile.storage_profile.image_reference: + aux_subscriptions = _parse_aux_subscriptions(vmss.virtual_machine_profile.storage_profile.image_reference['id']) + client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions) + + VMProtectionPolicy = cmd.get_models('VirtualMachineScaleSetVMProtectionPolicy') + + # handle vmss instance update + from azure.cli.core.util import b64encode + + if instance_id is not None: + if license_type is not None: + vmss.license_type = license_type + + if user_data is not None: + vmss.user_data = b64encode(user_data) + + if not vmss.protection_policy: + vmss.protection_policy = VMProtectionPolicy() + + if protect_from_scale_in is not None: + vmss.protection_policy.protect_from_scale_in = protect_from_scale_in + + if protect_from_scale_set_actions is not None: + vmss.protection_policy.protect_from_scale_set_actions = protect_from_scale_set_actions + + return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.begin_update, + resource_group_name, name, instance_id, **kwargs) + + # else handle vmss update + if license_type is not None: + vmss.virtual_machine_profile.license_type = license_type + + if user_data is not None: + vmss.virtual_machine_profile.user_data = b64encode(user_data) + + if v_cpus_available is not None or v_cpus_per_core is not None: + HardwareProfile = cmd.get_models('HardwareProfile') + VMSizeProperties = cmd.get_models('VMSizeProperties') + hardware_profile = HardwareProfile(vm_size_properties=VMSizeProperties(v_cpus_available=v_cpus_available, + v_cpus_per_core=v_cpus_per_core)) + vmss.virtual_machine_profile.hardware_profile = hardware_profile + + if capacity_reservation_group is not None: + CapacityReservationProfile = cmd.get_models('CapacityReservationProfile') + SubResource = cmd.get_models('SubResource') + if capacity_reservation_group == 'None': + capacity_reservation_group = None + sub_resource = SubResource(id=capacity_reservation_group) + capacity_reservation = CapacityReservationProfile(capacity_reservation_group=sub_resource) + vmss.virtual_machine_profile.capacity_reservation = capacity_reservation + + if enable_terminate_notification is not None or terminate_notification_time is not None: + if vmss.virtual_machine_profile.scheduled_events_profile is None: + ScheduledEventsProfile = cmd.get_models('ScheduledEventsProfile') + vmss.virtual_machine_profile.scheduled_events_profile = ScheduledEventsProfile() + TerminateNotificationProfile = cmd.get_models('TerminateNotificationProfile') + vmss.virtual_machine_profile.scheduled_events_profile.terminate_notification_profile = \ + TerminateNotificationProfile(not_before_timeout=terminate_notification_time, + enable=enable_terminate_notification) + + if additional_scheduled_events is not None or \ + enable_user_reboot_scheduled_events is not None or enable_user_redeploy_scheduled_events is not None: + if vmss.scheduled_events_policy is None: + ScheduledEventsPolicy = cmd.get_models('ScheduledEventsPolicy') + UserInitiatedRedeploy = cmd.get_models('UserInitiatedRedeploy') + UserInitiatedReboot = cmd.get_models('UserInitiatedReboot') + EventGridAndResourceGraph = cmd.get_models('EventGridAndResourceGraph') + ScheduledEventsAdditionalPublishingTargets = cmd.get_models('ScheduledEventsAdditionalPublishingTargets') + vmss.scheduled_events_policy = ScheduledEventsPolicy() + vmss.scheduled_events_policy.scheduled_events_additional_publishing_targets = \ + ScheduledEventsAdditionalPublishingTargets() + vmss.scheduled_events_policy.scheduled_events_additional_publishing_targets. \ + event_grid_and_resource_graph = EventGridAndResourceGraph() + vmss.scheduled_events_policy.user_initiated_reboot = UserInitiatedReboot() + vmss.scheduled_events_policy.user_initiated_redeploy = UserInitiatedRedeploy() + vmss.scheduled_events_policy.scheduled_events_additional_publishing_targets. \ + event_grid_and_resource_graph.enable = additional_scheduled_events + vmss.scheduled_events_policy.user_initiated_redeploy.automatically_approve = \ + enable_user_redeploy_scheduled_events + vmss.scheduled_events_policy.user_initiated_reboot.automatically_approve = enable_user_reboot_scheduled_events + + if enable_osimage_notification is not None: + if vmss.virtual_machine_profile.scheduled_events_profile is None: + vmss.virtual_machine_profile.scheduled_events_profile = cmd.get_models('ScheduledEventsProfile')() + OSImageNotificationProfile = cmd.get_models('OSImageNotificationProfile') + vmss.virtual_machine_profile.scheduled_events_profile.os_image_notification_profile = \ + OSImageNotificationProfile(enable=enable_osimage_notification) + if enable_automatic_repairs is not None or \ + automatic_repairs_grace_period is not None or automatic_repairs_action is not None: + AutomaticRepairsPolicy = cmd.get_models('AutomaticRepairsPolicy') + vmss.automatic_repairs_policy = \ + AutomaticRepairsPolicy(enabled=enable_automatic_repairs, + grace_period=automatic_repairs_grace_period, + repair_action=automatic_repairs_action) + + if ultra_ssd_enabled is not None: + if cmd.supported_api_version(min_api='2019-03-01', operation_group='virtual_machine_scale_sets'): + if vmss.additional_capabilities is None: + AdditionalCapabilities = cmd.get_models('AdditionalCapabilities') + vmss.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled) + else: + vmss.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled + else: + if vmss.virtual_machine_profile.additional_capabilities is None: + AdditionalCapabilities = cmd.get_models('AdditionalCapabilities') + vmss.virtual_machine_profile.additional_capabilities = AdditionalCapabilities( + ultra_ssd_enabled=ultra_ssd_enabled) + else: + vmss.virtual_machine_profile.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled + + if scale_in_policy is not None or force_deletion is not None: + ScaleInPolicy = cmd.get_models('ScaleInPolicy') + vmss.scale_in_policy = ScaleInPolicy(rules=scale_in_policy, force_deletion=force_deletion) + + if enable_spot_restore is not None: + vmss.spot_restore_policy.enabled = enable_spot_restore + + if spot_restore_timeout is not None: + vmss.spot_restore_policy.restore_timeout = spot_restore_timeout + + if priority is not None: + vmss.virtual_machine_profile.priority = priority + + if max_price is not None: + if vmss.virtual_machine_profile.billing_profile is None: + BillingProfile = cmd.get_models('BillingProfile') + vmss.virtual_machine_profile.billing_profile = BillingProfile(max_price=max_price) + else: + vmss.virtual_machine_profile.billing_profile.max_price = max_price + + if security_type is not None or enable_secure_boot is not None or enable_vtpm is not None: + security_profile = vmss.virtual_machine_profile.security_profile + prev_security_type = security_profile.security_type if security_profile else None + # At present, `SecurityType` has options `TrustedLaunch` and `Standard` + if security_type == 'TrustedLaunch' and prev_security_type != security_type: + vmss.virtual_machine_profile.security_profile = { + 'securityType': security_type, + 'uefiSettings': { + 'secureBootEnabled': enable_secure_boot if enable_secure_boot is not None else False, + 'vTpmEnabled': enable_vtpm if enable_vtpm is not None else True + } + } + elif security_type == 'Standard': + if prev_security_type == 'TrustedLaunch': + logger.warning('Turning off Trusted launch disables foundational security for your VMs. ' + 'For more information, visit https://aka.ms/TrustedLaunch') + vmss.virtual_machine_profile.security_profile = { + 'securityType': security_type, + 'uefiSettings': None + } + else: + vmss.virtual_machine_profile.security_profile = {'uefiSettings': { + 'secureBootEnabled': enable_secure_boot, + 'vTpmEnabled': enable_vtpm + }} + + if enable_proxy_agent is not None or proxy_agent_mode is not None: + SecurityProfile = cmd.get_models('SecurityProfile') + ProxyAgentSettings = cmd.get_models('ProxyAgentSettings') + if vmss.virtual_machine_profile.security_profile is None: + vmss.virtual_machine_profile.security_profile = SecurityProfile() + vmss.virtual_machine_profile.security_profile.proxy_agent_settings = ProxyAgentSettings( + enabled=enable_proxy_agent, mode=proxy_agent_mode) + elif vmss.virtual_machine_profile.security_profile.proxy_agent_settings is None: + vmss.virtual_machine_profile.security_profile.proxy_agent_settings = ProxyAgentSettings( + enabled=enable_proxy_agent, mode=proxy_agent_mode) + else: + vmss.virtual_machine_profile.security_profile.proxy_agent_settings.enabled = enable_proxy_agent + vmss.virtual_machine_profile.security_profile.proxy_agent_settings.mode = proxy_agent_mode + + if regular_priority_count is not None or regular_priority_percentage is not None: + if vmss.orchestration_mode != 'Flexible': + raise ValidationError("--regular-priority-count/--regular-priority-percentage is only available for" + " VMSS with flexible orchestration mode") + if vmss.priority_mix_policy is None: + vmss.priority_mix_policy = { + 'baseRegularPriorityCount': regular_priority_count, + 'regularPriorityPercentageAboveBase': regular_priority_percentage + } + else: + if regular_priority_count is not None: + vmss.priority_mix_policy.base_regular_priority_count = regular_priority_count + if regular_priority_percentage is not None: + vmss.priority_mix_policy.regular_priority_percentage_above_base = regular_priority_percentage + + if proximity_placement_group is not None: + vmss.proximity_placement_group = {'id': proximity_placement_group} + + if max_batch_instance_percent is not None or max_unhealthy_instance_percent is not None \ + or max_unhealthy_upgraded_instance_percent is not None or pause_time_between_batches is not None \ + or enable_cross_zone_upgrade is not None or prioritize_unhealthy_instances is not None \ + or max_surge is not None: + if vmss.upgrade_policy is None: + vmss.upgrade_policy = {'rolling_upgrade_policy': None} + if vmss.upgrade_policy.rolling_upgrade_policy is None: + vmss.upgrade_policy.rolling_upgrade_policy = { + 'maxBatchInstancePercent': max_batch_instance_percent, + 'maxUnhealthyInstancePercent': max_unhealthy_instance_percent, + 'maxUnhealthyUpgradedInstancePercent': max_unhealthy_upgraded_instance_percent, + 'pauseTimeBetweenBatches': pause_time_between_batches, + 'enableCrossZoneUpgrade': enable_cross_zone_upgrade, + 'prioritizeUnhealthyInstances': prioritize_unhealthy_instances, + 'maxSurge': max_surge + } + else: + vmss.upgrade_policy.rolling_upgrade_policy.max_batch_instance_percent = max_batch_instance_percent + vmss.upgrade_policy.rolling_upgrade_policy.max_unhealthy_instance_percent = max_unhealthy_instance_percent + vmss.upgrade_policy.rolling_upgrade_policy.max_unhealthy_upgraded_instance_percent = \ + max_unhealthy_upgraded_instance_percent + vmss.upgrade_policy.rolling_upgrade_policy.pause_time_between_batches = pause_time_between_batches + vmss.upgrade_policy.rolling_upgrade_policy.enable_cross_zone_upgrade = enable_cross_zone_upgrade + vmss.upgrade_policy.rolling_upgrade_policy.prioritize_unhealthy_instances = prioritize_unhealthy_instances + vmss.upgrade_policy.rolling_upgrade_policy.max_surge = max_surge + + if upgrade_policy_mode is not None: + vmss.upgrade_policy.mode = upgrade_policy_mode + + if enable_auto_os_upgrade is not None: + if vmss.upgrade_policy.automatic_os_upgrade_policy is None: + vmss.upgrade_policy.automatic_os_upgrade_policy = {'enableAutomaticOSUpgrade': enable_auto_os_upgrade} + else: + vmss.upgrade_policy.automatic_os_upgrade_policy.enable_automatic_os_upgrade = enable_auto_os_upgrade + + if vm_sku is not None: + if vmss.sku.name == vm_sku: + logger.warning("VMSS sku is already %s", vm_sku) + else: + vmss.sku.name = vm_sku + + sku_profile = dict() + if skuprofile_vmsizes is not None or skuprofile_allostrat is not None: + if skuprofile_vmsizes is not None: + sku_profile_vmsizes_list = [] + for vm_size in skuprofile_vmsizes: + vmsize_obj = { + 'name': vm_size + } + sku_profile_vmsizes_list.append(vmsize_obj) + sku_profile['vmSizes'] = sku_profile_vmsizes_list + if skuprofile_allostrat is not None: + sku_profile['allocationStrategy'] = skuprofile_allostrat + vmss.sku_profile = sku_profile + + if ephemeral_os_disk_placement is not None or ephemeral_os_disk_option is not None: + if vmss.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings is not None: + vmss.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings.placement = \ + ephemeral_os_disk_placement + vmss.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings.option = \ + ephemeral_os_disk_option + else: + DiffDiskSettings = cmd.get_models('DiffDiskSettings') + vmss.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings = DiffDiskSettings( + option=ephemeral_os_disk_option, placement=ephemeral_os_disk_placement) + + if ephemeral_os_disk is False: + vmss.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings = {} + + if disk_controller_type is not None: + vmss.virtual_machine_profile.storage_profile.disk_controller_type = disk_controller_type + + if custom_data is not None: + custom_data = read_content_if_is_file(custom_data) + vmss.virtual_machine_profile.os_profile.custom_data = b64encode(custom_data) + + if enable_hibernation is not None: + if vmss.additional_capabilities is None: + AdditionalCapabilities = cmd.get_models('AdditionalCapabilities') + vmss.additional_capabilities = AdditionalCapabilities(hibernation_enabled=enable_hibernation) + else: + vmss.additional_capabilities.hibernation_enabled = enable_hibernation + + if security_posture_reference_id is not None or security_posture_reference_exclude_extensions is not None: + security_posture_reference = vmss.virtual_machine_profile.security_posture_reference + if security_posture_reference is None: + SecurityPostureReference = cmd.get_models('SecurityPostureReference') + security_posture_reference = SecurityPostureReference() + + if security_posture_reference_id is not None: + security_posture_reference.id = security_posture_reference_id + if security_posture_reference_exclude_extensions is not None: + security_posture_reference.exclude_extensions = security_posture_reference_exclude_extensions + + vmss.virtual_machine_profile.security_posture_reference = security_posture_reference + + if enable_resilient_creation is not None or enable_resilient_deletion is not None: + resiliency_policy = vmss.resiliency_policy + if enable_resilient_creation is not None: + resiliency_policy.resilient_vm_creation_policy = {'enabled': enable_resilient_creation} + if enable_resilient_deletion is not None: + resiliency_policy.resilient_vm_deletion_policy = {'enabled': enable_resilient_deletion} + + if zones is not None: + vmss.zones = zones + + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, + resource_group_name, name, **kwargs) + + +# endregion + + +# region VirtualMachineScaleSets Diagnostics +def set_vmss_diagnostics_extension( + cmd, resource_group_name, vmss_name, settings, protected_settings=None, version=None, + no_auto_upgrade=False): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + # pylint: disable=no-member + is_linux_os = _is_linux_os(vmss.virtual_machine_profile) + vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT + if is_linux_os and vmss.virtual_machine_profile.extension_profile: # check incompatibles + exts = vmss.virtual_machine_profile.extension_profile.extensions or [] + major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0] + # For VMSS, we don't do auto-removal like VM because there is no reliable API to wait for + # the removal done before we can install the newer one + if next((e for e in exts if e.name == _LINUX_DIAG_EXT and + not e.type_handler_version.startswith(major_ver + '.')), None): + delete_cmd = 'az vmss extension delete -g {} --vmss-name {} -n {}'.format( + resource_group_name, vmss_name, vm_extension_name) + raise CLIError("There is an incompatible version of diagnostics extension installed. " + "Please remove it by running '{}', and retry. 'az vmss update-instances'" + " might be needed if with manual upgrade policy".format(delete_cmd)) + + poller = set_vmss_extension(cmd, resource_group_name, vmss_name, vm_extension_name, + extension_mappings[vm_extension_name]['publisher'], + version or extension_mappings[vm_extension_name]['version'], + settings, + protected_settings, + no_auto_upgrade) + + result = LongRunningOperation(cmd.cli_ctx)(poller) + UpgradeMode = cmd.get_models('UpgradeMode') + if vmss.upgrade_policy.mode == UpgradeMode.manual: + poller2 = update_vmss_instances(cmd, resource_group_name, vmss_name, ['*']) + LongRunningOperation(cmd.cli_ctx)(poller2) + return result + + +# endregion + + +# region VirtualMachineScaleSets Disks (Managed) +def attach_managed_data_disk_to_vmss(cmd, resource_group_name, vmss_name, size_gb=None, instance_id=None, lun=None, + caching=None, disk=None, sku=None): + def _init_data_disk(storage_profile, lun, existing_disk=None): + data_disks = storage_profile.data_disks or [] + if lun is None: + lun = _get_disk_lun(data_disks) + if existing_disk is None: + data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.empty, disk_size_gb=size_gb, + caching=caching, managed_disk=ManagedDiskParameters(storage_account_type=sku)) + else: + data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.attach, caching=caching, + managed_disk=ManagedDiskParameters(id=existing_disk, storage_account_type=sku)) + + data_disks.append(data_disk) + storage_profile.data_disks = data_disks + + DiskCreateOptionTypes, ManagedDiskParameters = cmd.get_models( + 'DiskCreateOptionTypes', 'ManagedDiskParameters') + if disk is None: + DataDisk = cmd.get_models('VirtualMachineScaleSetDataDisk') + else: + DataDisk = cmd.get_models('DataDisk') + + client = _compute_client_factory(cmd.cli_ctx) + if instance_id is None: + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + # pylint: disable=no-member + _init_data_disk(vmss.virtual_machine_profile.storage_profile, lun) + return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) + + vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id) + # Avoid unnecessary permission error + vmss_vm.storage_profile.image_reference = None + _init_data_disk(vmss_vm.storage_profile, lun, disk) + return client.virtual_machine_scale_set_vms.begin_update(resource_group_name, vmss_name, instance_id, vmss_vm) + + +def detach_disk_from_vmss(cmd, resource_group_name, vmss_name, lun, instance_id=None): + client = _compute_client_factory(cmd.cli_ctx) + if instance_id is None: + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + # pylint: disable=no-member + data_disks = vmss.virtual_machine_profile.storage_profile.data_disks + else: + vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id) + # Avoid unnecessary permission error + vmss_vm.storage_profile.image_reference = None + data_disks = vmss_vm.storage_profile.data_disks + + if not data_disks: + raise CLIError("Data disk doesn't exist") + + leftovers = [d for d in data_disks if d.lun != lun] + if len(data_disks) == len(leftovers): + raise CLIError("Could not find the data disk with lun '{}'".format(lun)) + + if instance_id is None: + vmss.virtual_machine_profile.storage_profile.data_disks = leftovers + return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) + vmss_vm.storage_profile.data_disks = leftovers + return client.virtual_machine_scale_set_vms.begin_update(resource_group_name, vmss_name, instance_id, vmss_vm) + + +# endregion + + +# region VirtualMachineScaleSets Extensions +def delete_vmss_extension(cmd, resource_group_name, vmss_name, extension_name): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + # pylint: disable=no-member + if not vmss.virtual_machine_profile.extension_profile: + raise CLIError('Scale set has no extensions to delete') + + keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions + if e.name != extension_name] + if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions): + raise CLIError('Extension {} not found'.format(extension_name)) + + vmss.virtual_machine_profile.extension_profile.extensions = keep_list + + return client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, parameters=vmss) + + +# pylint: disable=inconsistent-return-statements +def get_vmss_extension(cmd, resource_group_name, vmss_name, extension_name): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # pylint: disable=no-member + if not vmss.virtual_machine_profile.extension_profile: + return + return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions + if e.name == extension_name), None) + + +def list_vmss_extensions(cmd, resource_group_name, vmss_name): + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # pylint: disable=no-member + if vmss.virtual_machine_profile and vmss.virtual_machine_profile.extension_profile: + return vmss.virtual_machine_profile.extension_profile.extensions + return None + + +def set_vmss_extension(cmd, resource_group_name, vmss_name, extension_name, publisher, version=None, + settings=None, protected_settings=None, no_auto_upgrade=False, force_update=False, + no_wait=False, extension_instance_name=None, provision_after_extensions=None, + enable_auto_upgrade=None): + if not extension_instance_name: + extension_instance_name = extension_name + + auto_upgrade_extensions = ['CodeIntegrityAgent'] + if extension_name in auto_upgrade_extensions and enable_auto_upgrade is None: + enable_auto_upgrade = True + + client = _compute_client_factory(cmd.cli_ctx) + vmss = client.virtual_machine_scale_sets.get(resource_group_name=resource_group_name, vm_scale_set_name=vmss_name) + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( + 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') + + # pylint: disable=no-member + version = _normalize_extension_version(cmd.cli_ctx, publisher, extension_name, version, vmss.location) + extension_profile = vmss.virtual_machine_profile.extension_profile + if extension_profile: + extensions = extension_profile.extensions + if extensions: + extension_profile.extensions = [x for x in extensions if + x.type_properties_type.lower() != extension_name.lower() or x.publisher.lower() != publisher.lower()] # pylint: disable=line-too-long + + if cmd.supported_api_version(min_api='2019-07-01', operation_group='virtual_machine_scale_sets'): + ext = VirtualMachineScaleSetExtension(name=extension_instance_name, + publisher=publisher, + type_properties_type=extension_name, + protected_settings=protected_settings, + type_handler_version=version, + settings=settings, + auto_upgrade_minor_version=(not no_auto_upgrade), + provision_after_extensions=provision_after_extensions, + enable_automatic_upgrade=enable_auto_upgrade) + else: + ext = VirtualMachineScaleSetExtension(name=extension_instance_name, + publisher=publisher, + type=extension_name, + protected_settings=protected_settings, + type_handler_version=version, + settings=settings, + auto_upgrade_minor_version=(not no_auto_upgrade), + provision_after_extensions=provision_after_extensions, + enable_automatic_upgrade=enable_auto_upgrade) + + if force_update: + ext.force_update_tag = str(_gen_guid()) + + if not vmss.virtual_machine_profile.extension_profile: + vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[]) + vmss.virtual_machine_profile.extension_profile.extensions.append(ext) + + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, + resource_group_name, vmss_name, vmss) + + +def set_orchestration_service_state(cmd, resource_group_name, vm_scale_set_name, service_name, action, no_wait=False): + # currently service_name has only one available value "AutomaticRepairs". And SDK does not accept service_name, + # instead SDK assign it to "AutomaticRepairs" in its own logic. As there may be more service name to be supported, + # we define service_name as a required parameter here to avoid introducing a breaking change in the future. + client = _compute_client_factory(cmd.cli_ctx) + OrchestrationServiceStateInput = cmd.get_models('OrchestrationServiceStateInput') + state_input = OrchestrationServiceStateInput(service_name=service_name, action=action) + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_set_orchestration_service_state, + resource_group_name, vm_scale_set_name, state_input) + + +def upgrade_vmss_extension(cmd, resource_group_name, vm_scale_set_name, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + return sdk_no_wait(no_wait, client.virtual_machine_scale_set_rolling_upgrades.begin_start_extension_upgrade, + resource_group_name, vm_scale_set_name) + + +# endregion + + +# region VirtualMachineScaleSets RunCommand +def vmss_run_command_invoke(cmd, resource_group_name, vmss_name, command_id, instance_id, scripts=None, + parameters=None): # pylint: disable=line-too-long + return run_command_invoke(cmd, resource_group_name, vmss_name, command_id, scripts, parameters, instance_id) + + +def vmss_run_command_create(client, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + run_command = {} + run_command['location'] = location + if tags is not None: + run_command['tags'] = tags + source = {} + if script is not None: + source['script'] = script + if script_uri is not None: + source['script_uri'] = script_uri + if command_id is not None: + source['command_id'] = command_id + run_command['source'] = source + if parameters is not None: + auto_arg_name_num = 0 + run_command['parameters'] = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['parameters'].append({'name': n, 'value': v}) + if protected_parameters is not None: + auto_arg_name_num = 0 + run_command['protected_parameters'] = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['protected_parameters'].append({'name': n, 'value': v}) + if async_execution is not None: + run_command['async_execution'] = async_execution + else: + run_command['async_execution'] = False + if run_as_user is not None: + run_command['run_as_user'] = run_as_user + if run_as_password is not None: + run_command['run_as_password'] = run_as_password + if timeout_in_seconds is not None: + run_command['timeout_in_seconds'] = timeout_in_seconds + if output_blob_uri is not None: + run_command['output_blob_uri'] = output_blob_uri + if error_blob_uri is not None: + run_command['error_blob_uri'] = error_blob_uri + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, + instance_id=instance_id, + run_command_name=run_command_name, + run_command=run_command) + + +def vmss_run_command_update(client, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + location, + tags=None, + script=None, + script_uri=None, + command_id=None, + parameters=None, + protected_parameters=None, + async_execution=None, + run_as_user=None, + run_as_password=None, + timeout_in_seconds=None, + output_blob_uri=None, + error_blob_uri=None, + no_wait=False): + run_command = {} + run_command['location'] = location + if tags is not None: + run_command['tags'] = tags + source = {} + if script is not None: + source['script'] = script + if script_uri is not None: + source['script_uri'] = script_uri + if command_id is not None: + source['command_id'] = command_id + run_command['source'] = source + if parameters is not None: + auto_arg_name_num = 0 + run_command['parameters'] = [] + for p in parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['parameters'].append({'name': n, 'value': v}) + if protected_parameters is not None: + auto_arg_name_num = 0 + run_command['protected_parameters'] = [] + for p in protected_parameters: + if '=' in p: + n, v = p.split('=', 1) + else: + auto_arg_name_num += 1 + n = 'arg{}'.format(auto_arg_name_num) + v = p + run_command['protected_parameters'].append({'name': n, 'value': v}) + if async_execution is not None: + run_command['async_execution'] = async_execution + else: + run_command['async_execution'] = False + if run_as_user is not None: + run_command['run_as_user'] = run_as_user + if run_as_password is not None: + run_command['run_as_password'] = run_as_password + if timeout_in_seconds is not None: + run_command['timeout_in_seconds'] = timeout_in_seconds + if output_blob_uri is not None: + run_command['output_blob_uri'] = output_blob_uri + if error_blob_uri is not None: + run_command['error_blob_uri'] = error_blob_uri + return sdk_no_wait(no_wait, + client.begin_update, + resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, + instance_id=instance_id, + run_command_name=run_command_name, + run_command=run_command) + + +def vmss_run_command_delete(client, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + no_wait=False): + return sdk_no_wait(no_wait, + client.begin_delete, + resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, + instance_id=instance_id, + run_command_name=run_command_name) + + +def vmss_run_command_list(client, + resource_group_name, + vmss_name, + instance_id, + expand=None): + return client.list(resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, + instance_id=instance_id, + expand=expand) + + +def vmss_run_command_show(client, + resource_group_name, + vmss_name, + instance_id, + run_command_name, + expand=None, + instance_view=False): + if instance_view: + expand = 'instanceView' + return client.get(resource_group_name=resource_group_name, + vm_scale_set_name=vmss_name, + instance_id=instance_id, + run_command_name=run_command_name, + expand=expand) + + +# endregion + + +# region VirtualMachineScaleSets Identity +def remove_vmss_identity(cmd, resource_group_name, vmss_name, identities=None): + client = _compute_client_factory(cmd.cli_ctx) + + def _get_vmss(_, resource_group_name, vmss_name): + return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + + def _set_vmss(resource_group_name, name, vmss_instance): + VirtualMachineScaleSetUpdate = cmd.get_models('VirtualMachineScaleSetUpdate', + operation_group='virtual_machine_scale_sets') + vmss_update = VirtualMachineScaleSetUpdate(identity=vmss_instance.identity) + return client.virtual_machine_scale_sets.begin_update(resource_group_name, vmss_name, vmss_update) + + if identities is None: + from ._vm_utils import MSI_LOCAL_ID + identities = [MSI_LOCAL_ID] + + return _remove_identities(cmd, resource_group_name, vmss_name, identities, + _get_vmss, + _set_vmss) + + +# endregion + + +# from azure.mgmt.compute.models import Gallery, SharingProfile +def update_image_galleries(cmd, resource_group_name, gallery_name, gallery, permissions=None, + soft_delete=None, publisher_uri=None, publisher_contact=None, eula=None, + public_name_prefix=None, **kwargs): + if permissions: + if gallery.sharing_profile is None: + SharingProfile = cmd.get_models('SharingProfile', operation_group='shared_galleries') + gallery.sharing_profile = SharingProfile(permissions=permissions) + else: + gallery.sharing_profile.permissions = permissions + community_gallery_info = None + if permissions == 'Community': + if publisher_uri is None or publisher_contact is None or eula is None or public_name_prefix is None: + raise RequiredArgumentMissingError('If you want to share to the community, ' + 'you need to fill in all the following parameters:' + ' --publisher-uri, --publisher-email, --eula, --public-name-prefix.') + + CommunityGalleryInfo = cmd.get_models('CommunityGalleryInfo', operation_group='shared_galleries') + community_gallery_info = CommunityGalleryInfo(publisher_uri=publisher_uri, + publisher_contact=publisher_contact, + eula=eula, + public_name_prefix=public_name_prefix) + gallery.sharing_profile.community_gallery_info = community_gallery_info + + if soft_delete is not None: + if gallery.soft_delete_policy: + gallery.soft_delete_policy.is_soft_delete_enabled = soft_delete + else: + gallery.soft_delete_policy = {'is_soft_delete_enabled': soft_delete} + else: + # This is a workaround to solve historical legacy issues, + # send None to the service will let service not modify this property. + # We can delete this logic when the service no longer checks AFEC in the future. + gallery.soft_delete_policy = None + + client = _compute_client_factory(cmd.cli_ctx) + + return client.galleries.begin_create_or_update(resource_group_name, gallery_name, gallery, **kwargs) + + +def show_image_gallery(cmd, resource_group_name, gallery_name, select=None, sharing_groups=None): + if sharing_groups: + sharing_groups = 'sharingProfile/Groups' + client = _compute_client_factory(cmd.cli_ctx) + return client.galleries.get(resource_group_name, gallery_name, select=select, expand=sharing_groups) + + +def create_image_gallery(cmd, resource_group_name, gallery_name, description=None, + location=None, no_wait=False, tags=None, permissions=None, soft_delete=None, + publisher_uri=None, publisher_contact=None, eula=None, public_name_prefix=None): + Gallery = cmd.get_models('Gallery') + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + gallery = Gallery(description=description, location=location, tags=(tags or {})) + if soft_delete is not None: + gallery.soft_delete_policy = {'is_soft_delete_enabled': soft_delete} + client = _compute_client_factory(cmd.cli_ctx) + if permissions: + SharingProfile = cmd.get_models('SharingProfile', operation_group='shared_galleries') + gallery.sharing_profile = SharingProfile(permissions=permissions) + if permissions == 'Community': + if publisher_uri is None or publisher_contact is None or eula is None or public_name_prefix is None: + raise RequiredArgumentMissingError('If you want to share to the community, ' + 'you need to fill in all the following parameters:' + ' --publisher-uri, --publisher-email, --eula, --public-name-prefix.') + + CommunityGalleryInfo = cmd.get_models('CommunityGalleryInfo', operation_group='shared_galleries') + gallery.sharing_profile.community_gallery_info = CommunityGalleryInfo(publisher_uri=publisher_uri, + publisher_contact=publisher_contact, + eula=eula, + public_name_prefix=public_name_prefix) + + return sdk_no_wait(no_wait, client.galleries.begin_create_or_update, resource_group_name, gallery_name, gallery) + + +def create_gallery_image(cmd, resource_group_name, gallery_name, gallery_image_name, os_type, publisher, offer, sku, + os_state='Generalized', end_of_life_date=None, privacy_statement_uri=None, + release_note_uri=None, eula=None, description=None, location=None, + minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None, + disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None, + hyper_v_generation='V2', features=None, architecture=None): + logger.warning( + "Starting Build (May) 2024, \"az sig image-definition create\" command will use the new default values " + "Hyper-V Generation: V2 and SecurityType: TrustedLaunchSupported." + ) + + # pylint: disable=line-too-long + GalleryImage, GalleryImageIdentifier, RecommendedMachineConfiguration, ResourceRange, Disallowed, ImagePurchasePlan, GalleryImageFeature = cmd.get_models( + 'GalleryImage', 'GalleryImageIdentifier', 'RecommendedMachineConfiguration', 'ResourceRange', 'Disallowed', + 'ImagePurchasePlan', 'GalleryImageFeature') + client = _compute_client_factory(cmd.cli_ctx) + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + + end_of_life_date = fix_gallery_image_date_info(end_of_life_date) + recommendation = None + if any([minimum_cpu_core, maximum_cpu_core, minimum_memory, maximum_memory]): + cpu_recommendation, memory_recommendation = None, None + if any([minimum_cpu_core, maximum_cpu_core]): + cpu_recommendation = ResourceRange(min=minimum_cpu_core, max=maximum_cpu_core) + if any([minimum_memory, maximum_memory]): + memory_recommendation = ResourceRange(min=minimum_memory, max=maximum_memory) + recommendation = RecommendedMachineConfiguration(v_cp_us=cpu_recommendation, memory=memory_recommendation) + purchase_plan = None + if any([plan_name, plan_publisher, plan_product]): + purchase_plan = ImagePurchasePlan(name=plan_name, publisher=plan_publisher, product=plan_product) + + feature_list = None + if features: + from ._constants import COMPATIBLE_SECURITY_TYPE_VALUE, UPGRADE_SECURITY_HINT + feature_list = [] + security_type = None + for item in features.split(): + try: + key, value = item.split('=', 1) + # create Non-Trusted Launch VM Image + # The `Standard` is used for backward compatibility to allow customers to keep their current behavior + # after changing the default values to Trusted Launch VMs in the future. + if key == 'SecurityType': + security_type = True + if key == 'SecurityType' and value == COMPATIBLE_SECURITY_TYPE_VALUE: + logger.warning(UPGRADE_SECURITY_HINT) + continue + feature_list.append(GalleryImageFeature(name=key, value=value)) + except ValueError: + raise CLIError('usage error: --features KEY=VALUE [KEY=VALUE ...]') + if security_type is None and hyper_v_generation == 'V2': + feature_list.append(GalleryImageFeature(name='SecurityType', value='TrustedLaunchSupported')) + if features is None and cmd.cli_ctx.cloud.profile == 'latest' and hyper_v_generation == 'V2': + feature_list = [] + feature_list.append(GalleryImageFeature(name='SecurityType', value='TrustedLaunchSupported')) + + image = GalleryImage(identifier=GalleryImageIdentifier(publisher=publisher, offer=offer, sku=sku), + os_type=os_type, os_state=os_state, end_of_life_date=end_of_life_date, + recommended=recommendation, disallowed=Disallowed(disk_types=disallowed_disk_types), + purchase_plan=purchase_plan, location=location, eula=eula, tags=(tags or {}), + hyper_v_generation=hyper_v_generation, features=feature_list, architecture=architecture) + return client.gallery_images.begin_create_or_update(resource_group_name, gallery_name, gallery_image_name, image) + + +def _add_aux_subscription(aux_subscriptions, resource_id): + if resource_id: + aux_subs = _parse_aux_subscriptions(resource_id) + if aux_subs and aux_subs[0] not in aux_subscriptions: + aux_subscriptions.extend(aux_subs) + + +def _get_image_version_aux_subscription(managed_image, os_snapshot, data_snapshots): + aux_subscriptions = [] + _add_aux_subscription(aux_subscriptions, managed_image) + _add_aux_subscription(aux_subscriptions, os_snapshot) + if data_snapshots: + for data_snapshot in data_snapshots: + _add_aux_subscription(aux_subscriptions, data_snapshot) + return aux_subscriptions if aux_subscriptions else None + + +def create_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version, + location=None, target_regions=None, storage_account_type=None, + end_of_life_date=None, exclude_from_latest=None, replica_count=None, tags=None, + os_snapshot=None, data_snapshots=None, managed_image=None, data_snapshot_luns=None, + target_region_encryption=None, os_vhd_uri=None, os_vhd_storage_account=None, + data_vhds_uris=None, data_vhds_luns=None, data_vhds_storage_accounts=None, + replication_mode=None, target_region_cvm_encryption=None, virtual_machine=None, + image_version=None, target_zone_encryption=None, target_edge_zones=None, + allow_replicated_location_deletion=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + + ImageVersionPublishingProfile, GalleryArtifactSource, ManagedArtifact, ImageVersion, TargetRegion = cmd.get_models( + 'GalleryImageVersionPublishingProfile', 'GalleryArtifactSource', 'ManagedArtifact', 'GalleryImageVersion', + 'TargetRegion') + aux_subscriptions = _get_image_version_aux_subscription(managed_image, os_snapshot, data_snapshots) + client = _compute_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions) + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + end_of_life_date = fix_gallery_image_date_info(end_of_life_date) + if managed_image and not is_valid_resource_id(managed_image): + managed_image = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='images', name=managed_image) + if os_snapshot and not is_valid_resource_id(os_snapshot): + os_snapshot = resource_id(subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='snapshots', name=os_snapshot) + if data_snapshots: + for i, s in enumerate(data_snapshots): + if not is_valid_resource_id(data_snapshots[i]): + data_snapshots[i] = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Compute', type='snapshots', name=s) + + profile = ImageVersionPublishingProfile(exclude_from_latest=exclude_from_latest, + end_of_life_date=end_of_life_date, + target_regions=target_regions or [TargetRegion(name=location)], + replica_count=replica_count, storage_account_type=storage_account_type) + + if target_edge_zones: + profile.target_extended_locations = target_edge_zones + + if replication_mode is not None: + profile.replication_mode = replication_mode + if not cmd.supported_api_version(min_api='2022-03-03', operation_group='gallery_image_versions'): + source = GalleryArtifactSource(managed_image=ManagedArtifact(id=managed_image)) + profile.source = source + + if cmd.supported_api_version(min_api='2019-07-01', operation_group='gallery_image_versions'): + if managed_image is None and os_snapshot is None and os_vhd_uri is None: + raise RequiredArgumentMissingError('usage error: Please provide --managed-image or --os-snapshot or --vhd') + GalleryImageVersionStorageProfile = cmd.get_models('GalleryImageVersionStorageProfile') + GalleryOSDiskImage = cmd.get_models('GalleryOSDiskImage') + GalleryDataDiskImage = cmd.get_models('GalleryDataDiskImage') + if cmd.supported_api_version(min_api='2022-03-03', operation_group='gallery_image_versions'): + GalleryArtifactVersionFullSource = cmd.get_models('GalleryArtifactVersionFullSource') + GalleryDiskImageSource = cmd.get_models('GalleryDiskImageSource') + else: + GalleryArtifactVersionFullSource = cmd.get_models('GalleryArtifactVersionSource') + GalleryDiskImageSource = cmd.get_models('GalleryArtifactVersionSource') + + source = os_disk_image = data_disk_images = None + if virtual_machine is not None and cmd.supported_api_version(min_api='2023-07-03', + operation_group='gallery_image_versions'): + source = GalleryArtifactVersionFullSource(virtual_machine_id=virtual_machine) + elif managed_image is not None: + source = GalleryArtifactVersionFullSource(id=managed_image) + if os_snapshot is not None: + os_disk_image = GalleryOSDiskImage(source=GalleryDiskImageSource(id=os_snapshot)) + if data_snapshot_luns and not data_snapshots: + raise ArgumentUsageError('usage error: --data-snapshot-luns must be used together with --data-snapshots') + if data_snapshots: + if data_snapshot_luns and len(data_snapshots) != len(data_snapshot_luns): + raise ArgumentUsageError('usage error: Length of --data-snapshots and ' + '--data-snapshot-luns should be equal.') + if not data_snapshot_luns: + data_snapshot_luns = list(range(len(data_snapshots))) + data_disk_images = [] + for i, s in enumerate(data_snapshots): + data_disk_images.append(GalleryDataDiskImage(source=GalleryDiskImageSource(id=s), + lun=data_snapshot_luns[i])) + # from vhd, only support os image now + if cmd.supported_api_version(min_api='2020-09-30', operation_group='gallery_image_versions'): + # OS disk + if os_vhd_uri and os_vhd_storage_account is None or os_vhd_uri is None and os_vhd_storage_account: + raise ArgumentUsageError('--os-vhd-uri and --os-vhd-storage-account should be used together.') + if os_vhd_uri and os_vhd_storage_account: + if not is_valid_resource_id(os_vhd_storage_account): + os_vhd_storage_account = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Storage', type='storageAccounts', name=os_vhd_storage_account) + os_disk_image = GalleryOSDiskImage(source=GalleryDiskImageSource( + id=os_vhd_storage_account, uri=os_vhd_uri)) + + # Data disks + if data_vhds_uris and data_vhds_storage_accounts is None or \ + data_vhds_uris is None and data_vhds_storage_accounts: + raise ArgumentUsageError('--data-vhds-uris and --data-vhds-storage-accounts should be used together.') + if data_vhds_luns and data_vhds_uris is None: + raise ArgumentUsageError('--data-vhds-luns must be used together with --data-vhds-uris') + if data_vhds_uris: + # Generate LUNs + if data_vhds_luns is None: + # 0, 1, 2, ... + data_vhds_luns = list(range(len(data_vhds_uris))) + # Check length + len_data_vhds_uris = len(data_vhds_uris) + len_data_vhds_luns = len(data_vhds_luns) + len_data_vhds_storage_accounts = len(data_vhds_storage_accounts) + if len_data_vhds_uris != len_data_vhds_luns or len_data_vhds_uris != len_data_vhds_storage_accounts: + raise ArgumentUsageError( + 'Length of --data-vhds-uris, --data-vhds-luns, --data-vhds-storage-accounts must be same.') + # Generate full storage account ID + for i, storage_account in enumerate(data_vhds_storage_accounts): + if not is_valid_resource_id(storage_account): + data_vhds_storage_accounts[i] = resource_id( + subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, + namespace='Microsoft.Storage', type='storageAccounts', name=storage_account) + if data_disk_images is None: + data_disk_images = [] + for uri, lun, account in zip(data_vhds_uris, data_vhds_luns, data_vhds_storage_accounts): + data_disk_images.append(GalleryDataDiskImage( + source=GalleryDiskImageSource(id=account, uri=uri), lun=lun)) + + storage_profile = GalleryImageVersionStorageProfile(source=source, os_disk_image=os_disk_image, + data_disk_images=data_disk_images) + image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}), + storage_profile=storage_profile) + if allow_replicated_location_deletion is not None: + GalleryImageVersionSafetyProfile = cmd.get_models('GalleryImageVersionSafetyProfile', + operation_group='gallery_image_versions') + image_version.safety_profile = GalleryImageVersionSafetyProfile( + allow_deletion_of_replicated_locations=allow_replicated_location_deletion) + else: + if managed_image is None: + raise RequiredArgumentMissingError('usage error: Please provide --managed-image') + image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {})) + + return client.gallery_image_versions.begin_create_or_update( + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_image_name=gallery_image_name, + gallery_image_version_name=gallery_image_version, + gallery_image_version=image_version + ) + + +def undelete_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version, + location=None, tags=None, allow_replicated_location_deletion=None): + ImageVersion = cmd.get_models('GalleryImageVersion') + client = _compute_client_factory(cmd.cli_ctx) + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + + gallery = client.galleries.get(resource_group_name, gallery_name) + soft_delete = gallery.soft_delete_policy.is_soft_delete_enabled + if not soft_delete: + from azure.cli.core.azclierror import InvalidArgumentValueError + raise InvalidArgumentValueError('soft-deletion is not enabled in Gallery \'{}\''.format(gallery_name)) + + image_version = ImageVersion(publishing_profile=None, location=location, tags=(tags or {}), + storage_profile=None) + if allow_replicated_location_deletion is not None: + GalleryImageVersionSafetyProfile = cmd.get_models('GalleryImageVersionSafetyProfile', + operation_group='gallery_image_versions') + image_version.safety_profile = GalleryImageVersionSafetyProfile( + allow_deletion_of_replicated_locations=allow_replicated_location_deletion) + + return client.gallery_image_versions.begin_create_or_update( + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_image_name=gallery_image_name, + gallery_image_version_name=gallery_image_version, + gallery_image_version=image_version + ) + + +def fix_gallery_image_date_info(date_info): + # here we add needed time, if only date is provided, so the setting can be accepted by servie end + if date_info and 't' not in date_info.lower(): + date_info += 'T12:59:59Z' + return date_info + + +# pylint: disable=line-too-long +def get_image_version_to_update(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name): + client = _compute_client_factory(cmd.cli_ctx) + version = client.gallery_image_versions.get(resource_group_name, gallery_name, gallery_image_name, + gallery_image_version_name) + # To avoid unnecessary permission check of image + version.storage_profile.source = None + if version.storage_profile.os_disk_image and version.storage_profile.os_disk_image.source: + version.storage_profile.os_disk_image.source = None + if version.storage_profile.data_disk_images: + for i in range(len(version.storage_profile.data_disk_images)): + if version.storage_profile.data_disk_images[i].source: + version.storage_profile.data_disk_images[i].source = None + return version + + +def update_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name, + target_regions=None, replica_count=None, allow_replicated_location_deletion=None, + target_edge_zones=None, no_wait=False, **kwargs): + image_version = kwargs['gallery_image_version'] + + if target_regions: + image_version.publishing_profile.target_regions = target_regions + if replica_count: + image_version.publishing_profile.replica_count = replica_count + if image_version.storage_profile.source is not None: + image_version.storage_profile.os_disk_image = image_version.storage_profile.data_disk_images = None + # target extended locations will be updated when --target-edge-zones is specified + if target_edge_zones is not None: + image_version.publishing_profile.target_extended_locations = target_edge_zones \ + if len(target_edge_zones) > 0 else None + if allow_replicated_location_deletion is not None: + image_version.safety_profile.allow_deletion_of_replicated_locations = allow_replicated_location_deletion + + client = _compute_client_factory(cmd.cli_ctx) + + return sdk_no_wait(no_wait, client.gallery_image_versions.begin_create_or_update, resource_group_name, gallery_name, + gallery_image_name, gallery_image_version_name, **kwargs) + + +# endregion + + +# region proximity placement groups +def create_proximity_placement_group(cmd, client, proximity_placement_group_name, resource_group_name, + ppg_type=None, location=None, tags=None, zone=None, intent_vm_sizes=None): + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + ProximityPlacementGroup = cmd.get_models('ProximityPlacementGroup') + + ppg_params = ProximityPlacementGroup(name=proximity_placement_group_name, proximity_placement_group_type=ppg_type, + location=location, tags=(tags or {}), zones=zone) + + if intent_vm_sizes: + Intent = cmd.get_models('ProximityPlacementGroupPropertiesIntent') + intent = Intent(vm_sizes=intent_vm_sizes) + ppg_params.intent = intent + + return client.create_or_update(resource_group_name=resource_group_name, + proximity_placement_group_name=proximity_placement_group_name, parameters=ppg_params) + + +def update_ppg(cmd, instance, intent_vm_sizes=None, ppg_type=None): + if intent_vm_sizes: + Intent = cmd.get_models('ProximityPlacementGroupPropertiesIntent') + intent = Intent(vm_sizes=intent_vm_sizes) + instance.intent = intent + if ppg_type: + instance.proximity_placement_group_type = ppg_type + return instance + + +def list_proximity_placement_groups(client, resource_group_name=None): + if resource_group_name: + return client.list_by_resource_group(resource_group_name=resource_group_name) + return client.list_by_subscription() + + +# endregion + + +# region dedicated host +def create_dedicated_host_group(cmd, client, host_group_name, resource_group_name, platform_fault_domain_count, + automatic_placement=None, location=None, zones=None, tags=None, ultra_ssd_enabled=None): + DedicatedHostGroup = cmd.get_models('DedicatedHostGroup') + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + + host_group_params = DedicatedHostGroup(location=location, platform_fault_domain_count=platform_fault_domain_count, + support_automatic_placement=automatic_placement, zones=zones, tags=tags) + if ultra_ssd_enabled is not None: + additionalCapabilities = {'ultraSSDEnabled': ultra_ssd_enabled} + host_group_params.additional_capabilities = additionalCapabilities + + return client.create_or_update(resource_group_name, host_group_name, parameters=host_group_params) + + +def get_dedicated_host_group_instance_view(client, host_group_name, resource_group_name): + return client.get(resource_group_name, host_group_name, expand="instanceView") + + +def create_dedicated_host(cmd, client, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None, + auto_replace_on_failure=None, license_type=None, location=None, tags=None): + DedicatedHostType = cmd.get_models('DedicatedHost') + SkuType = cmd.get_models('Sku') + + location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name) + sku = SkuType(name=sku) + + host_params = DedicatedHostType(location=location, platform_fault_domain=platform_fault_domain, + auto_replace_on_failure=auto_replace_on_failure, license_type=license_type, + sku=sku, tags=tags) + + return client.begin_create_or_update(resource_group_name, host_group_name, host_name, parameters=host_params) + + +def get_dedicated_host_instance_view(client, host_group_name, host_name, resource_group_name): + return client.get(resource_group_name, host_group_name, host_name, expand="instanceView") + + +# endregion + + +# region VMMonitor +def _get_log_analytics_client(cmd): + from ._client_factory import cf_log_analytics + from azure.cli.core.commands.client_factory import get_subscription_id + subscription_id = get_subscription_id(cmd.cli_ctx) + return cf_log_analytics(cmd.cli_ctx, subscription_id) + + +def _prepare_workspace(cmd, resource_group_name, workspace): + from azure.mgmt.core.tools import is_valid_resource_id + + from azure.core.exceptions import HttpResponseError + + workspace_id = None + if not is_valid_resource_id(workspace): + workspace_name = workspace + log_client = _get_log_analytics_client(cmd) + workspace_result = None + try: + workspace_result = log_client.workspaces.get(resource_group_name, workspace_name) + except HttpResponseError: + from azure.mgmt.loganalytics.models import Workspace, WorkspaceSku, WorkspaceSkuNameEnum + sku = WorkspaceSku(name=WorkspaceSkuNameEnum.per_gb2018.value) + retention_time = 30 # default value + location = _get_resource_group_location(cmd.cli_ctx, resource_group_name) + workspace_instance = Workspace(location=location, + sku=sku, + retention_in_days=retention_time) + workspace_result = LongRunningOperation(cmd.cli_ctx)(log_client.workspaces.begin_create_or_update( + resource_group_name, + workspace_name, + workspace_instance)) + workspace_id = workspace_result.id + else: + workspace_id = workspace + return workspace_id + + +def _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name): + from ._client_factory import cf_log_analytics_data_sources + from azure.cli.core.commands.client_factory import get_subscription_id + from azure.mgmt.loganalytics.models import DataSource + from azure.core.exceptions import HttpResponseError + + subscription_id = get_subscription_id(cmd.cli_ctx) + data_sources_client = cf_log_analytics_data_sources(cmd.cli_ctx, subscription_id) + data_source_name_template = "DataSource_{}_{}" + + default_data_sources = None + if os_type.lower() == 'linux': + from .._workspace_data_source_settings import default_linux_data_sources + default_data_sources = default_linux_data_sources + elif os_type.lower() == 'windows': + from .._workspace_data_source_settings import default_windows_data_sources + default_data_sources = default_windows_data_sources + + if default_data_sources is not None: + for data_source_kind, data_source_settings in default_data_sources.items(): + for data_source_setting in data_source_settings: + data_source = DataSource(kind=data_source_kind, + properties=data_source_setting) + data_source_name = data_source_name_template.format(data_source_kind, _gen_guid()) + try: + data_sources_client.create_or_update(resource_group_name, + workspace_name, + data_source_name, + data_source) + except HttpResponseError as ex: + logger.warning("Failed to set data source due to %s. " + "Skip this step and need manual work later.", ex.message) + else: + logger.warning("Unsupported OS type. Skip the default settings for log analytics workspace.") + + +def execute_query_for_vm(cmd, client, resource_group_name, vm_name, analytics_query, timespan=None): + """Executes a query against the Log Analytics workspace linked with a vm.""" + vm = get_vm(cmd, resource_group_name, vm_name) + workspace = None + extension_resources = vm.resources or [] + for resource in extension_resources: + if resource.name == "MicrosoftMonitoringAgent" or resource.name == "OmsAgentForLinux": + workspace = resource.settings.get('workspaceId', None) + if workspace is None: + raise CLIError('Cannot find the corresponding log analytics workspace. ' + 'Please check the status of log analytics workpsace.') + return client.query_workspace(workspace, analytics_query, timespan=timespan) + + +def _set_log_analytics_workspace_extension(cmd, resource_group_name, vm, vm_name, workspace_name): + is_linux_os = _is_linux_os(vm) + vm_extension_name = _LINUX_OMS_AGENT_EXT if is_linux_os else _WINDOWS_OMS_AGENT_EXT + log_client = _get_log_analytics_client(cmd) + customer_id = log_client.workspaces.get(resource_group_name, workspace_name).customer_id + settings = { + 'workspaceId': customer_id, + 'stopOnMultipleConnections': 'true' + } + primary_shared_key = log_client.shared_keys.get_shared_keys(resource_group_name, workspace_name).primary_shared_key + protected_settings = { + 'workspaceKey': primary_shared_key, + } + return set_extension(cmd, resource_group_name, vm_name, vm_extension_name, + extension_mappings[vm_extension_name]['publisher'], + extension_mappings[vm_extension_name]['version'], + settings, + protected_settings) + + +# endregion + + +# disk encryption set +def create_disk_encryption_set( + cmd, client, resource_group_name, disk_encryption_set_name, key_url, source_vault=None, encryption_type=None, + location=None, tags=None, no_wait=False, enable_auto_key_rotation=None, federated_client_id=None, + mi_system_assigned=None, mi_user_assigned=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + DiskEncryptionSet, EncryptionSetIdentity, KeyForDiskEncryptionSet, SourceVault = cmd.get_models( + 'DiskEncryptionSet', 'EncryptionSetIdentity', 'KeyForDiskEncryptionSet', 'SourceVault') + + identity_type, user_assigned_identities = \ + _build_identities_info_from_system_user_assigned(cmd, mi_system_assigned, mi_user_assigned) + + encryption_set_identity = EncryptionSetIdentity(type=identity_type) + if user_assigned_identities is not None: + encryption_set_identity.user_assigned_identities = user_assigned_identities + + if source_vault is not None: + if not is_valid_resource_id(source_vault): + source_vault = resource_id(subscription=get_subscription_id(cmd.cli_ctx), + resource_group=resource_group_name, + namespace='Microsoft.KeyVault', type='vaults', name=source_vault) + source_vault = SourceVault(id=source_vault) + + key_for_disk_emcryption_set = KeyForDiskEncryptionSet(source_vault=source_vault, key_url=key_url) + disk_encryption_set = DiskEncryptionSet(location=location, tags=tags, identity=encryption_set_identity, + active_key=key_for_disk_emcryption_set, encryption_type=encryption_type, + rotation_to_latest_key_version_enabled=enable_auto_key_rotation) + + if federated_client_id is not None: + disk_encryption_set.federated_client_id = federated_client_id + + return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, disk_encryption_set_name, + disk_encryption_set) + + +def update_disk_encryption_set(cmd, instance, client, resource_group_name, key_url=None, source_vault=None, + enable_auto_key_rotation=None, federated_client_id=None): + from azure.mgmt.core.tools import resource_id, is_valid_resource_id + from azure.cli.core.commands.client_factory import get_subscription_id + if key_url: + instance.active_key.key_url = key_url + + if source_vault: + if not is_valid_resource_id(source_vault): + source_vault = resource_id(subscription=get_subscription_id(cmd.cli_ctx), + resource_group=resource_group_name, + namespace='Microsoft.KeyVault', type='vaults', name=source_vault) + instance.active_key.source_vault = {'id': source_vault} + + if enable_auto_key_rotation is not None: + instance.rotation_to_latest_key_version_enabled = enable_auto_key_rotation + + if federated_client_id is not None: + instance.federated_client_id = federated_client_id + + return instance + + +def assign_disk_encryption_set_identity(cmd, client, resource_group_name, disk_encryption_set_name, + mi_system_assigned=None, mi_user_assigned=None): + DiskEncryptionSetUpdate, EncryptionSetIdentity = cmd.get_models('DiskEncryptionSetUpdate', 'EncryptionSetIdentity', + operation_group='disk_encryption_sets') + from azure.cli.core.commands.arm import assign_identity as assign_identity_helper + client = _compute_client_factory(cmd.cli_ctx) + + def getter(): + return client.disk_encryption_sets.get(resource_group_name, disk_encryption_set_name) + + def setter(disk_encryption_set, mi_system_assigned=mi_system_assigned, mi_user_assigned=mi_user_assigned): + IdentityType = cmd.get_models('DiskEncryptionSetIdentityType', operation_group='disk_encryption_sets') + existing_system_identity = False + existing_user_identities = set() + if disk_encryption_set.identity is not None: + existing_system_identity = disk_encryption_set.identity.type in [IdentityType.SYSTEM_ASSIGNED_USER_ASSIGNED, + IdentityType.SYSTEM_ASSIGNED] + existing_user_identities = {x.lower() for x in + list((disk_encryption_set.identity.user_assigned_identities or {}).keys())} + + add_system_assigned = mi_system_assigned + add_user_assigned = {x.lower() for x in (mi_user_assigned or [])} + + updated_system_assigned = existing_system_identity or add_system_assigned + updated_user_assigned = list(existing_user_identities.union(add_user_assigned)) + + identity_types, user_assigned_identities = _build_identities_info_from_system_user_assigned( + cmd, updated_system_assigned, updated_user_assigned) + + encryption_set_identity = EncryptionSetIdentity(type=identity_types, + user_assigned_identities=user_assigned_identities) + + disk_encryption_set_update = DiskEncryptionSetUpdate() + disk_encryption_set_update.identity = encryption_set_identity + return patch_disk_encryption_set(cmd, resource_group_name, disk_encryption_set_name, disk_encryption_set_update) + + disk_encryption_set = assign_identity_helper(cmd.cli_ctx, getter, setter) + return disk_encryption_set.identity + + +def remove_disk_encryption_set_identity(cmd, client, resource_group_name, disk_encryption_set_name, + mi_system_assigned=None, mi_user_assigned=None): + DiskEncryptionSetUpdate = cmd.get_models('DiskEncryptionSetUpdate', operation_group='disk_encryption_sets') + client = _compute_client_factory(cmd.cli_ctx) + + def getter(cmd, resource_group_name, disk_encryption_set_name): + return client.disk_encryption_sets.get(resource_group_name, disk_encryption_set_name) + + def setter(resource_group_name, disk_encryption_set_name, disk_encryption_set): + disk_encryption_set_update = DiskEncryptionSetUpdate(identity=disk_encryption_set.identity) + return client.disk_encryption_sets.begin_update(resource_group_name, disk_encryption_set_name, + disk_encryption_set_update) + + return _remove_disk_encryption_set_identities(cmd, resource_group_name, disk_encryption_set_name, + mi_system_assigned, mi_user_assigned, getter, setter) + + +def show_disk_encryption_set_identity(cmd, resource_group_name, disk_encryption_set_name): + client = _compute_client_factory(cmd.cli_ctx) + return client.disk_encryption_sets.get(resource_group_name, disk_encryption_set_name).identity + + +# endregion + + +# region Disk Access +def create_disk_access(cmd, client, resource_group_name, disk_access_name, location=None, tags=None, no_wait=False): + DiskAccess = cmd.get_models('DiskAccess') + disk_access = DiskAccess(location=location, tags=tags) + return sdk_no_wait(no_wait, client.begin_create_or_update, + resource_group_name, disk_access_name, disk_access) + + +def set_disk_access(cmd, client, parameters, resource_group_name, disk_access_name, tags=None, no_wait=False): + location = _get_resource_group_location(cmd.cli_ctx, resource_group_name) + DiskAccess = cmd.get_models('DiskAccess') + disk_access = DiskAccess(location=location, tags=tags) + return sdk_no_wait(no_wait, client.begin_create_or_update, + resource_group_name, disk_access_name, disk_access) + + +# endregion + + +# region install patches +def install_vm_patches(cmd, client, resource_group_name, vm_name, maximum_duration, reboot_setting, + classifications_to_include_win=None, classifications_to_include_linux=None, + kb_numbers_to_include=None, kb_numbers_to_exclude=None, + exclude_kbs_requiring_reboot=None, package_name_masks_to_include=None, + package_name_masks_to_exclude=None, max_patch_publish_date=None, no_wait=False): + VMInstallPatchesParameters, WindowsParameters, LinuxParameters = cmd.get_models( + 'VirtualMachineInstallPatchesParameters', 'WindowsParameters', 'LinuxParameters') + windows_parameters = WindowsParameters(classifications_to_include=classifications_to_include_win, + kb_numbers_to_inclunde=kb_numbers_to_include, + kb_numbers_to_exclude=kb_numbers_to_exclude, + exclude_kbs_requirig_reboot=exclude_kbs_requiring_reboot, + max_patch_publish_date=max_patch_publish_date) + linux_parameters = LinuxParameters(classifications_to_include=classifications_to_include_linux, + package_name_masks_to_include=package_name_masks_to_include, + package_name_masks_to_exclude=package_name_masks_to_exclude) + install_patches_input = VMInstallPatchesParameters(maximum_duration=maximum_duration, reboot_setting=reboot_setting, + linux_parameters=linux_parameters, + windows_parameters=windows_parameters) + + return sdk_no_wait(no_wait, client.begin_install_patches, resource_group_name=resource_group_name, vm_name=vm_name, + install_patches_input=install_patches_input) + + +# endregion + + +def sig_shared_gallery_list(client, location, shared_to=None): + # Keep it here as it will add subscription in the future and we need to set it to None to make it work + if shared_to == 'subscription': + shared_to = None + return client.list(location=location, shared_to=shared_to) + + +def get_page_result(generator, marker, show_next_marker=None): + pages = generator.by_page(continuation_token=marker) # ContainerPropertiesPaged + result = list_generator(pages=pages) + + if show_next_marker: + next_marker = {"nextMarker": pages.continuation_token} + result.append(next_marker) + else: + if pages.continuation_token: + logger.warning('Next Marker:') + logger.warning(pages.continuation_token) + + return result + + +def list_generator(pages, num_results=50): + result = [] + + # get first page items + page = list(next(pages)) + result += page + + while True: + if not pages.continuation_token: + break + + # handle num results + if num_results is not None: + if num_results <= len(result): + break + + page = list(next(pages)) + result += page + + return result + + +def sig_share_update(cmd, client, resource_group_name, gallery_name, subscription_ids=None, tenant_ids=None, + op_type=None): + SharingProfileGroup, SharingUpdate, SharingProfileGroupTypes = cmd.get_models( + 'SharingProfileGroup', 'SharingUpdate', 'SharingProfileGroupTypes', operation_group='shared_galleries') + if op_type != 'EnableCommunity': + if subscription_ids is None and tenant_ids is None: + raise RequiredArgumentMissingError('At least one of subscription ids or tenant ids must be provided') + groups = [] + if subscription_ids: + groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.SUBSCRIPTIONS, ids=subscription_ids)) + if tenant_ids: + groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.AAD_TENANTS, ids=tenant_ids)) + sharing_update = SharingUpdate(operation_type=op_type, groups=groups) + return client.begin_update(resource_group_name=resource_group_name, + gallery_name=gallery_name, + sharing_update=sharing_update) + + +def sig_share_reset(cmd, client, resource_group_name, gallery_name): + SharingUpdate, SharingUpdateOperationTypes = cmd.get_models('SharingUpdate', 'SharingUpdateOperationTypes', + operation_group='shared_galleries') + sharing_update = SharingUpdate(operation_type=SharingUpdateOperationTypes.RESET) + return client.begin_update(resource_group_name=resource_group_name, + gallery_name=gallery_name, + sharing_update=sharing_update) + + +def sig_shared_image_definition_list(client, location, gallery_unique_name, + shared_to=None, marker=None, show_next_marker=None): + # Keep it here as it will add subscription in the future and we need to set it to None to make it work + if shared_to == 'subscription': + shared_to = None + generator = client.list(location=location, gallery_unique_name=gallery_unique_name, shared_to=shared_to) + return get_page_result(generator, marker, show_next_marker) + + +def sig_shared_image_version_list(client, location, gallery_unique_name, gallery_image_name, + shared_to=None, marker=None, show_next_marker=None): + # Keep it here as it will add subscription in the future and we need to set it to None to make it work + if shared_to == 'subscription': + shared_to = None + generator = client.list(location=location, gallery_unique_name=gallery_unique_name, + gallery_image_name=gallery_image_name, shared_to=shared_to) + return get_page_result(generator, marker, show_next_marker) + + +def gallery_application_create(client, + resource_group_name, + gallery_name, + gallery_application_name, + os_type, + location, + tags=None, + description=None, + no_wait=False): + gallery_application = {} + gallery_application['location'] = location + if tags is not None: + gallery_application['tags'] = tags + if description is not None: + gallery_application['description'] = description + if os_type is not None: + gallery_application['supported_os_type'] = os_type + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_application_name=gallery_application_name, + gallery_application=gallery_application) + + +def gallery_application_update(client, + resource_group_name, + gallery_name, + gallery_application_name, + location, + tags=None, + description=None, + no_wait=False): + gallery_application = {} + gallery_application['location'] = location + if tags is not None: + gallery_application['tags'] = tags + if description is not None: + gallery_application['description'] = description + return sdk_no_wait(no_wait, + client.begin_update, + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_application_name=gallery_application_name, + gallery_application=gallery_application) + + +def gallery_application_version_create(client, + resource_group_name, + gallery_name, + gallery_application_name, + gallery_application_version_name, + location, + package_file_link, + install_command, + remove_command, + tags=None, + update_command=None, + target_regions=None, + default_file_link=None, + end_of_life_date=None, + package_file_name=None, + config_file_name=None, + exclude_from=None, + no_wait=False): + gallery_application_version = {} + gallery_application_version['publishing_profile'] = {} + gallery_application_version['location'] = location + if tags is not None: + gallery_application_version['tags'] = tags + source = {} + source['media_link'] = package_file_link + if default_file_link is not None: + source['default_configuration_link'] = default_file_link + gallery_application_version['publishing_profile']['source'] = source + manage_actions = {} + manage_actions['install'] = install_command + manage_actions['remove'] = remove_command + if update_command is not None: + manage_actions['update'] = update_command + gallery_application_version['publishing_profile']['manage_actions'] = manage_actions + if target_regions is not None: + gallery_application_version['publishing_profile']['target_regions'] = target_regions + if exclude_from is not None: + gallery_application_version['publishing_profile']['exclude_from_latest'] = exclude_from + if end_of_life_date is not None: + gallery_application_version['publishing_profile']['end_of_life_date'] = end_of_life_date + settings = {} + if package_file_name is not None: + settings['package_file_name'] = package_file_name + if config_file_name is not None: + settings['config_file_name'] = config_file_name + if settings: + gallery_application_version['publishing_profile']['settings'] = settings + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_application_name=gallery_application_name, + gallery_application_version_name=gallery_application_version_name, + gallery_application_version=gallery_application_version) + + +def gallery_application_version_update(client, + resource_group_name, + gallery_name, + gallery_application_name, + gallery_application_version_name, + location, + package_file_link, + tags=None, + target_regions=None, + default_file_link=None, + end_of_life_date=None, + exclude_from=None, + no_wait=False): + gallery_application_version = {} + gallery_application_version['publishing_profile'] = {} + gallery_application_version['location'] = location + if tags is not None: + gallery_application_version['tags'] = tags + source = {} + source['media_link'] = package_file_link + if default_file_link is not None: + source['default_configuration_link'] = default_file_link + gallery_application_version['publishing_profile']['source'] = source + if target_regions is not None: + gallery_application_version['publishing_profile']['target_regions'] = [target_regions] + if exclude_from is not None: + gallery_application_version['publishing_profile']['exclude_from_latest'] = exclude_from + if end_of_life_date is not None: + gallery_application_version['publishing_profile']['end_of_life_date'] = end_of_life_date + return sdk_no_wait(no_wait, + client.begin_create_or_update, + resource_group_name=resource_group_name, + gallery_name=gallery_name, + gallery_application_name=gallery_application_name, + gallery_application_version_name=gallery_application_version_name, + gallery_application_version=gallery_application_version) + + +def get_gallery_instance(cmd, resource_group_name, gallery_name): + from ._client_factory import cf_vm_cl + client = cf_vm_cl(cmd.cli_ctx) + SelectPermissions = cmd.get_models('SelectPermissions', operation_group='shared_galleries') + return client.galleries.get(resource_group_name, gallery_name, select=SelectPermissions.PERMISSIONS) + + +def create_capacity_reservation_group(cmd, client, resource_group_name, capacity_reservation_group_name, location=None, + tags=None, zones=None, sharing_profile=None): + CapacityReservationGroup = cmd.get_models('CapacityReservationGroup') + if sharing_profile is not None: + subscription_ids = [{'id': sub_id} for sub_id in sharing_profile] + sharing_profile = {'subscriptionIds': subscription_ids} + capacity_reservation_group = CapacityReservationGroup(location=location, tags=tags, + zones=zones, sharing_profile=sharing_profile) + return client.create_or_update(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + parameters=capacity_reservation_group) + + +def update_capacity_reservation_group(cmd, client, resource_group_name, capacity_reservation_group_name, tags=None, + sharing_profile=None): + CapacityReservationGroupUpdate = cmd.get_models('CapacityReservationGroupUpdate') + if sharing_profile is not None: + subscription_ids = [{'id': sub_id} for sub_id in sharing_profile] + sharing_profile = {'subscriptionIds': subscription_ids} + capacity_reservation_group = CapacityReservationGroupUpdate(tags=tags, sharing_profile=sharing_profile) + return client.update(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + parameters=capacity_reservation_group) + + +def show_capacity_reservation_group(client, resource_group_name, capacity_reservation_group_name, + instance_view=None): + expand = None + if instance_view: + expand = 'instanceView' + return client.get(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + expand=expand) + + +def create_capacity_reservation(cmd, client, resource_group_name, capacity_reservation_group_name, + capacity_reservation_name, location=None, sku_name=None, capacity=None, + zone=None, tags=None): + Sku = cmd.get_models('Sku') + sku = Sku(name=sku_name, capacity=capacity) + CapacityReservation = cmd.get_models('CapacityReservation') + capacity_reservation = CapacityReservation(location=location, sku=sku, zones=zone, tags=tags) + return client.begin_create_or_update(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + capacity_reservation_name=capacity_reservation_name, + parameters=capacity_reservation) + + +def update_capacity_reservation(cmd, client, resource_group_name, capacity_reservation_group_name, + capacity_reservation_name, capacity=None, tags=None): + Sku = cmd.get_models('Sku') + sku = Sku(capacity=capacity) + + # If only the data of SKU capacity is updated, the original tags will be cleared. + # Therefore, before the service fixes this issue, we add this temporary logic + if tags is None: + capacity_reservation = client.get(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + capacity_reservation_name=capacity_reservation_name) + tags = capacity_reservation.tags + + CapacityReservationUpdate = cmd.get_models('CapacityReservationUpdate') + capacity_reservation_update = CapacityReservationUpdate(sku=sku, tags=tags) + return client.begin_update(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + capacity_reservation_name=capacity_reservation_name, + parameters=capacity_reservation_update) + + +def show_capacity_reservation(client, resource_group_name, capacity_reservation_group_name, capacity_reservation_name, + instance_view=None): + expand = None + if instance_view: + expand = 'instanceView' + return client.get(resource_group_name=resource_group_name, + capacity_reservation_group_name=capacity_reservation_group_name, + capacity_reservation_name=capacity_reservation_name, expand=expand) + + +def set_vm_applications(cmd, vm_name, resource_group_name, application_version_ids, order_applications=False, + application_configuration_overrides=None, treat_deployment_as_failure=None, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + ApplicationProfile, VMGalleryApplication = cmd.get_models('ApplicationProfile', 'VMGalleryApplication') + try: + vm = client.virtual_machines.get(resource_group_name, vm_name) + except ResourceNotFoundError: + raise ResourceNotFoundError('Could not find vm {}.'.format(vm_name)) + + vm.application_profile = ApplicationProfile( + gallery_applications=[VMGalleryApplication(package_reference_id=avid) for avid in application_version_ids]) + + if order_applications: + index = 1 + for app in vm.application_profile.gallery_applications: + app.order = index + index += 1 + + if application_configuration_overrides: + index = 0 + for over_ride in application_configuration_overrides: + if over_ride or over_ride.lower() != 'null': + vm.application_profile.gallery_applications[index].configuration_reference = over_ride + index += 1 + + if treat_deployment_as_failure: + index = 0 + for treat_as_failure in treat_deployment_as_failure: + vm.application_profile.gallery_applications[index].treat_failure_as_deployment_failure = \ + (treat_as_failure.lower() == 'true') + index += 1 + return sdk_no_wait(no_wait, client.virtual_machines.begin_create_or_update, resource_group_name, vm_name, vm) + + +def list_vm_applications(cmd, vm_name, resource_group_name): + client = _compute_client_factory(cmd.cli_ctx) + try: + vm = client.virtual_machines.get(resource_group_name, vm_name) + except ResourceNotFoundError: + raise ResourceNotFoundError('Could not find vm {}.'.format(vm_name)) + return vm.application_profile + + +def set_vmss_applications(cmd, vmss_name, resource_group_name, application_version_ids, order_applications=False, + application_configuration_overrides=None, treat_deployment_as_failure=None, no_wait=False): + client = _compute_client_factory(cmd.cli_ctx) + ApplicationProfile, VMGalleryApplication = cmd.get_models('ApplicationProfile', 'VMGalleryApplication') + try: + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + except ResourceNotFoundError: + raise ResourceNotFoundError('Could not find vmss {}.'.format(vmss_name)) + + vmss.virtual_machine_profile.application_profile = ApplicationProfile( + gallery_applications=[VMGalleryApplication(package_reference_id=avid) for avid in application_version_ids]) + + if order_applications: + index = 1 + for app in vmss.virtual_machine_profile.application_profile.gallery_applications: + app.order = index + index += 1 + + if application_configuration_overrides: + index = 0 + for over_ride in application_configuration_overrides: + if over_ride or over_ride.lower() != 'null': + vmss.virtual_machine_profile.application_profile.gallery_applications[ + index].configuration_reference = over_ride + index += 1 + + if treat_deployment_as_failure: + index = 0 + for treat_as_failure in treat_deployment_as_failure: + vmss.virtual_machine_profile.application_profile.gallery_applications[ + index].treat_failure_as_deployment_failure = (treat_as_failure.lower() == 'true') + index += 1 + return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.begin_create_or_update, resource_group_name, + vmss_name, vmss) + + +def list_vmss_applications(cmd, vmss_name, resource_group_name): + client = _compute_client_factory(cmd.cli_ctx) + try: + vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + except ResourceNotFoundError: + raise ResourceNotFoundError('Could not find vmss {}.'.format(vmss_name)) + return vmss.virtual_machine_profile.application_profile + + +# region Restore point collection +def restore_point_create(client, + resource_group_name, + restore_point_collection_name, + restore_point_name, + exclude_disks=None, + source_restore_point=None, + consistency_mode=None, + source_os_resource=None, + os_restore_point_encryption_set=None, + os_restore_point_encryption_type=None, + source_data_disk_resource=None, + data_disk_restore_point_encryption_set=None, + data_disk_restore_point_encryption_type=None, + no_wait=False): + parameters = {} + if exclude_disks is not None: + parameters['excludeDisks'] = [] + for disk in exclude_disks: + parameters['excludeDisks'].append({'id': disk}) + if source_restore_point is not None: + parameters['sourceRestorePoint'] = {'id': source_restore_point} + if consistency_mode is not None: + parameters['consistencyMode'] = consistency_mode + + storage_profile = {} + # Local restore point + if source_restore_point is None: + os_disk = {} + if source_os_resource is not None: + managed_disk = { + 'id': source_os_resource + } + os_disk['managedDisk'] = managed_disk + if os_restore_point_encryption_set is None and os_restore_point_encryption_type is None: + raise ArgumentUsageError( + 'usage error: --os-restore-point-encryption-set or --os-restore-point-encryption-type must be used together with --source-os-resource') + + disk_restore_point = {} + if os_restore_point_encryption_set is not None or os_restore_point_encryption_type is not None: + encryption = {} + if os_restore_point_encryption_set is not None: + encryption['diskEncryptionSet'] = { + 'id': os_restore_point_encryption_set + } + if os_restore_point_encryption_type is not None: + encryption['type'] = os_restore_point_encryption_type + + if encryption: + disk_restore_point['encryption'] = encryption + + if disk_restore_point: + os_disk['diskRestorePoint'] = disk_restore_point + + if os_disk: + storage_profile['osDisk'] = os_disk + + data_disks = [] + if source_data_disk_resource is not None: + if data_disk_restore_point_encryption_set is None and data_disk_restore_point_encryption_type is None: + raise ArgumentUsageError( + 'usage error: --data-disk-restore-point-encryption-set or --data-disk-restore-point-encryption-type must be used together with --source-data-disk-resource') + if data_disk_restore_point_encryption_set is not None and ( + len(source_data_disk_resource) != len(data_disk_restore_point_encryption_set)): + raise ArgumentUsageError( + 'Length of --source-data-disk-resource, --data-disk-restore-point-encryption-set must be same.') + if data_disk_restore_point_encryption_type is not None and ( + len(source_data_disk_resource) != len(data_disk_restore_point_encryption_type)): + raise ArgumentUsageError( + 'Length of --source-data-disk-resource, --data-disk-restore-point-encryption-type must be same.') + + for i in range(len(source_data_disk_resource)): + data_disks.append({ + 'managedDisk': { + 'id': source_data_disk_resource[i] + }, + 'diskRestorePoint': { + 'encryption': { + 'disk_encryption_set': { + 'id': data_disk_restore_point_encryption_set[ + i] if data_disk_restore_point_encryption_set is not None else None + }, + 'type': data_disk_restore_point_encryption_type[ + i] if data_disk_restore_point_encryption_type is not None else None + } + } + }) + + if data_disks: + storage_profile['dataDisks'] = data_disks + + # Remote restore point + if source_restore_point is not None: + os_disk = {} + disk_restore_point = {} + if source_os_resource is not None: + source_disk_restore_point = { + 'id': source_os_resource + } + disk_restore_point['sourceDiskRestorePoint'] = source_disk_restore_point + if os_restore_point_encryption_set is None and os_restore_point_encryption_type is None: + raise ArgumentUsageError( + 'usage error: --os-restore-point-encryption-set or --os-restore-point-encryption-type must be used together with --source-os-resource') + + if os_restore_point_encryption_set is not None or os_restore_point_encryption_type is not None: + encryption = {} + if os_restore_point_encryption_set is not None: + encryption['diskEncryptionSet'] = { + 'id': os_restore_point_encryption_set + } + if os_restore_point_encryption_type is not None: + encryption['type'] = os_restore_point_encryption_type + + if encryption: + disk_restore_point['encryption'] = encryption + if disk_restore_point: + os_disk['diskRestorePoint'] = disk_restore_point + if os_disk: + storage_profile['osDisk'] = os_disk + + data_disks = [] + if source_data_disk_resource is not None: + if data_disk_restore_point_encryption_set is None and data_disk_restore_point_encryption_type is None: + raise ArgumentUsageError( + 'usage error: --data-disk-restore-point-encryption-set or --data-disk-restore-point-encryption-type must be used together with --source-data-disk-resource') + if data_disk_restore_point_encryption_set is not None and ( + len(source_data_disk_resource) != len(data_disk_restore_point_encryption_set)): + raise ArgumentUsageError( + 'Length of --source-data-disk-resource, --data-disk-restore-point-encryption-set must be same.') + if data_disk_restore_point_encryption_type is not None and ( + len(source_data_disk_resource) != len(data_disk_restore_point_encryption_type)): + raise ArgumentUsageError( + 'Length of --source-data-disk-resource, --data-disk-restore-point-encryption-type must be same.') + + for i in range(len(source_data_disk_resource)): + data_disks.append({ + 'diskRestorePoint': { + 'sourceDiskRestorePoint': { + 'id': source_data_disk_resource[i] + }, + 'encryption': { + 'disk_encryption_set': { + 'id': data_disk_restore_point_encryption_set[ + i] if data_disk_restore_point_encryption_set is not None else None + }, + 'type': data_disk_restore_point_encryption_type[ + i] if data_disk_restore_point_encryption_type is not None else None + } + } + }) + if data_disks: + storage_profile['dataDisks'] = data_disks + + if storage_profile: + parameters['sourceMetadata'] = {'storageProfile': storage_profile} + return sdk_no_wait(no_wait, + client.begin_create, + resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + restore_point_name=restore_point_name, + parameters=parameters) + + +def restore_point_show(client, + resource_group_name, + restore_point_name, + restore_point_collection_name, + expand=None, + instance_view=None): + if instance_view is not None: + expand = 'instanceView' + return client.get(resource_group_name=resource_group_name, + restore_point_name=restore_point_name, + restore_point_collection_name=restore_point_collection_name, + expand=expand) + + +# endRegion + + +# region Restore point collection +def restore_point_collection_show(client, + resource_group_name, + restore_point_collection_name, + expand=None, + restore_points=None): + if restore_points is not None: + expand = 'restorePoints' + return client.get(resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + expand=expand) + + +def restore_point_collection_create(client, + resource_group_name, + restore_point_collection_name, + location, + source_id, + tags=None): + parameters = {} + properties = {} + parameters['location'] = location + if tags is not None: + parameters['tags'] = tags + properties['source'] = {'id': source_id} + parameters['properties'] = properties + return client.create_or_update(resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + parameters=parameters) + + +def restore_point_collection_update(client, + resource_group_name, + restore_point_collection_name, + tags=None): + parameters = {} + if tags is not None: + parameters['tags'] = tags + return client.update(resource_group_name=resource_group_name, + restore_point_collection_name=restore_point_collection_name, + parameters=parameters) + + +# endRegion + + +# region Community gallery +def sig_community_gallery_list(cmd, location=None, marker=None, show_next_marker=None): + from .._arg_client import ARGClient, QueryBody + + query_table = 'communitygalleryresources' + query_type = 'microsoft.compute/locations/communitygalleries' + + query = "{}| where type == '{}' ".format(query_table, query_type) + if location: + # Since the location data in table "communitygalleryresources" is in lowercase + # For accurate matching, we also need to convert the location in the query statement to lowercase + query = query + "| where location == '{}' ".format(location.lower()) + query_body = QueryBody(query) + + item_count_per_page = 30 + query_body.options = { + "$top": item_count_per_page + } + + if marker: + query_body.options['$skipToken'] = marker + + query_result = ARGClient(cmd.cli_ctx).send(query_body) + result = _transform_community_gallery_list_output(query_result) + + continuation_token = query_result.get('$skipToken') + + if show_next_marker: + next_marker = {"nextMarker": continuation_token} + result.append(next_marker) + else: + if continuation_token: + logger.warning('Next Marker:') + logger.warning(continuation_token) + + return result + + +def _transform_community_gallery_list_output(result): + result_data = result.get('data') + if not result_data: + return [] + + output = [] + for data_item in result_data: + from collections import OrderedDict + output_item = OrderedDict() + output_item['id'] = data_item.get('id') + output_item['location'] = data_item.get('location') + output_item['name'] = data_item.get('name') + + properties = data_item.get('properties') + if properties: + output_item['communityMetadata'] = properties.get('communityMetadata', {}) + output_item['uniqueId'] = properties.get('identifier', {}).get('uniqueId') + + output.append(output_item) + + return output + + +def sig_community_image_definition_list(client, location, public_gallery_name, marker=None, show_next_marker=None): + generator = client.list(location=location, public_gallery_name=public_gallery_name) + return get_page_result(generator, marker, show_next_marker) + + +def sig_community_image_version_list(client, location, public_gallery_name, gallery_image_name, marker=None, + show_next_marker=None): + generator = client.list(location=location, public_gallery_name=public_gallery_name, + gallery_image_name=gallery_image_name) + return get_page_result(generator, marker, show_next_marker) +# endRegion diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/disk_encryption.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/disk_encryption.py new file mode 100644 index 00000000000..9f0321435aa --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/disk_encryption.py @@ -0,0 +1,575 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +import os +import uuid + +from knack.log import get_logger + +from azure.cli.command_modules.vm.azure_stack._vm_utils import get_key_vault_base_url, \ + create_data_plane_keyvault_key_client +from azure.cli.command_modules.vm.azure_stack.custom import set_vm, _compute_client_factory, _is_linux_os +from azure.cli.core.commands import LongRunningOperation + +_DATA_VOLUME_TYPE = 'DATA' +_ALL_VOLUME_TYPE = 'ALL' +_STATUS_ENCRYPTED = 'Encrypted' + +logger = get_logger(__name__) + +vm_extension_info = { + 'Linux': { + 'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security', + 'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryptionForLinux', + 'version': '1.1', + 'legacy_version': '0.1' + }, + 'Windows': { + 'publisher': os.environ.get('ADE_TEST_EXTENSION_PUBLISHER') or 'Microsoft.Azure.Security', + 'name': os.environ.get('ADE_TEST_EXTENSION_NAME') or 'AzureDiskEncryption', + 'version': '2.2', + 'legacy_version': '1.1' + } +} + + +def _find_existing_ade(vm, use_instance_view=False, ade_ext_info=None): + if not ade_ext_info: + ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows'] + if use_instance_view: + exts = vm.instance_view.extensions or [] + r = next((e for e in exts if e.type and e.type.lower().startswith(ade_ext_info['publisher'].lower()) and + e.name.lower() == ade_ext_info['name'].lower()), None) + else: + exts = vm.resources or [] + r = next((e for e in exts if (e.publisher.lower() == ade_ext_info['publisher'].lower() and + e.type_properties_type.lower() == ade_ext_info['name'].lower())), None) + return r + + +def _detect_ade_status(vm): + if vm.storage_profile.os_disk.encryption_settings: + return False, True + ade_ext_info = vm_extension_info['Linux'] if _is_linux_os(vm) else vm_extension_info['Windows'] + ade = _find_existing_ade(vm, ade_ext_info=ade_ext_info) + if ade is None: + return False, False + if ade.type_handler_version.split('.')[0] == ade_ext_info['legacy_version'].split('.')[0]: + return False, True + + return True, False # we believe impossible to have both old & new ADE + + +def encrypt_vm(cmd, resource_group_name, vm_name, # pylint: disable=too-many-locals, too-many-statements + disk_encryption_keyvault, + aad_client_id=None, + aad_client_secret=None, aad_client_cert_thumbprint=None, + key_encryption_keyvault=None, + key_encryption_key=None, + key_encryption_algorithm='RSA-OAEP', + volume_type=None, + encrypt_format_all=False, + force=False): + from azure.mgmt.core.tools import parse_resource_id + from knack.util import CLIError + + # pylint: disable=no-member + compute_client = _compute_client_factory(cmd.cli_ctx) + vm = compute_client.virtual_machines.get(resource_group_name, vm_name) + is_linux = _is_linux_os(vm) + backup_encryption_settings = vm.storage_profile.os_disk.encryption_settings + vm_encrypted = backup_encryption_settings.enabled if backup_encryption_settings else False + _, has_old_ade = _detect_ade_status(vm) + use_new_ade = not aad_client_id and not has_old_ade + extension = vm_extension_info['Linux' if is_linux else 'Windows'] + + if not use_new_ade and not aad_client_id: + raise CLIError('Please provide --aad-client-id') + + # 1. First validate arguments + if not use_new_ade and not aad_client_cert_thumbprint and not aad_client_secret: + raise CLIError('Please provide either --aad-client-cert-thumbprint or --aad-client-secret') + + if volume_type is None: + if not is_linux: + volume_type = _ALL_VOLUME_TYPE + elif vm.storage_profile.data_disks: + raise CLIError('VM has data disks, please supply --volume-type') + else: + volume_type = 'OS' + + # sequence_version should be unique + sequence_version = uuid.uuid4() + + # retrieve keyvault details + disk_encryption_keyvault_url = get_key_vault_base_url( + cmd.cli_ctx, (parse_resource_id(disk_encryption_keyvault))['name']) + + # disk encryption key itself can be further protected, so let us verify + if key_encryption_key: + key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault + + # to avoid bad server errors, ensure the vault has the right configurations + _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vm, force) + + # if key name and not key url, get url. + if key_encryption_key and '://' not in key_encryption_key: # if key name and not key url + key_encryption_key = _get_keyvault_key_url( + cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key) + + # 2. we are ready to provision/update the disk encryption extensions + # The following logic was mostly ported from xplat-cli + public_config = { + 'KeyVaultURL': disk_encryption_keyvault_url, + 'VolumeType': volume_type, + 'EncryptionOperation': 'EnableEncryption' if not encrypt_format_all else 'EnableEncryptionFormatAll', + 'KeyEncryptionKeyURL': key_encryption_key, + 'KeyEncryptionAlgorithm': key_encryption_algorithm, + 'SequenceVersion': sequence_version, + } + if use_new_ade: + public_config.update({ + "KeyVaultResourceId": disk_encryption_keyvault, + "KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '', + }) + else: + public_config.update({ + 'AADClientID': aad_client_id, + 'AADClientCertThumbprint': aad_client_cert_thumbprint, + }) + + ade_legacy_private_config = { + 'AADClientSecret': aad_client_secret if is_linux else (aad_client_secret or '') + } + + VirtualMachineExtension, DiskEncryptionSettings, KeyVaultSecretReference, KeyVaultKeyReference, SubResource = \ + cmd.get_models('VirtualMachineExtension', 'DiskEncryptionSettings', 'KeyVaultSecretReference', + 'KeyVaultKeyReference', 'SubResource') + + ext = VirtualMachineExtension( + location=vm.location, # pylint: disable=no-member + publisher=extension['publisher'], + type_properties_type=extension['name'], + protected_settings=None if use_new_ade else ade_legacy_private_config, + type_handler_version=extension['version'] if use_new_ade else extension['legacy_version'], + settings=public_config, + auto_upgrade_minor_version=True) + + poller = compute_client.virtual_machine_extensions.begin_create_or_update( + resource_group_name, vm_name, extension['name'], ext) + LongRunningOperation(cmd.cli_ctx)(poller) + poller.result() + + # verify the extension was ok + extension_result = compute_client.virtual_machine_extensions.get( + resource_group_name, vm_name, extension['name'], expand='instanceView') + if extension_result.provisioning_state != 'Succeeded': + raise CLIError('Extension needed for disk encryption was not provisioned correctly') + + if not use_new_ade: + if not (extension_result.instance_view.statuses and + extension_result.instance_view.statuses[0].message): + raise CLIError('Could not find url pointing to the secret for disk encryption') + + # 3. update VM's storage profile with the secrets + status_url = extension_result.instance_view.statuses[0].message + + vm = compute_client.virtual_machines.get(resource_group_name, vm_name) + secret_ref = KeyVaultSecretReference(secret_url=status_url, + source_vault=SubResource(id=disk_encryption_keyvault)) + + key_encryption_key_obj = None + if key_encryption_key: + key_encryption_key_obj = KeyVaultKeyReference(key_url=key_encryption_key, + source_vault=SubResource(id=key_encryption_keyvault)) + + disk_encryption_settings = DiskEncryptionSettings(disk_encryption_key=secret_ref, + key_encryption_key=key_encryption_key_obj, + enabled=True) + if vm_encrypted: + # stop the vm before update if the vm is already encrypted + logger.warning("Deallocating the VM before updating encryption settings...") + compute_client.virtual_machines.deallocate(resource_group_name, vm_name).result() + vm = compute_client.virtual_machines.get(resource_group_name, vm_name) + + vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings + set_vm(cmd, vm) + + if vm_encrypted: + # and start after the update + logger.warning("Restarting the VM after the update...") + compute_client.virtual_machines.start(resource_group_name, vm_name).result() + + if is_linux and volume_type != _DATA_VOLUME_TYPE: + old_ade_msg = "If you see 'VMRestartPending', please restart the VM, and the encryption will finish shortly" + logger.warning("The encryption request was accepted. Please use 'show' command to monitor " + "the progress. %s", "" if use_new_ade else old_ade_msg) + + +def decrypt_vm(cmd, resource_group_name, vm_name, volume_type=None, force=False): + from knack.util import CLIError + + compute_client = _compute_client_factory(cmd.cli_ctx) + vm = compute_client.virtual_machines.get(resource_group_name, vm_name) + has_new_ade, has_old_ade = _detect_ade_status(vm) + if not has_new_ade and not has_old_ade: + logger.warning('Azure Disk Encryption is not enabled') + return + is_linux = _is_linux_os(vm) + # pylint: disable=no-member + + # 1. be nice, figure out the default volume type and also verify VM will not be busted + if is_linux: + if volume_type: + if not force and volume_type != _DATA_VOLUME_TYPE: + raise CLIError("Only Data disks can have encryption disabled in a Linux VM. " + "Use '--force' to ignore the warning") + else: + volume_type = _DATA_VOLUME_TYPE + elif volume_type is None: + volume_type = _ALL_VOLUME_TYPE + + extension = vm_extension_info['Linux' if is_linux else 'Windows'] + # sequence_version should be incremented since encryptions occurred before + sequence_version = uuid.uuid4() + + # 2. update the disk encryption extension + # The following logic was mostly ported from xplat-cli + public_config = { + 'VolumeType': volume_type, + 'EncryptionOperation': 'DisableEncryption', + 'SequenceVersion': sequence_version, + } + + VirtualMachineExtension, DiskEncryptionSettings = cmd.get_models( + 'VirtualMachineExtension', 'DiskEncryptionSettings') + + ext = VirtualMachineExtension( + location=vm.location, # pylint: disable=no-member + publisher=extension['publisher'], + virtual_machine_extension_type=extension['name'], + type_handler_version=extension['version'] if has_new_ade else extension['legacy_version'], + settings=public_config, + auto_upgrade_minor_version=True) + + poller = compute_client.virtual_machine_extensions.begin_create_or_update(resource_group_name, + vm_name, + extension['name'], ext) + LongRunningOperation(cmd.cli_ctx)(poller) + poller.result() + extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, vm_name, + extension['name'], + expand='instanceView') + if extension_result.provisioning_state != 'Succeeded': + raise CLIError("Extension updating didn't succeed") + + if not has_new_ade: + # 3. Remove the secret from VM's storage profile + vm = compute_client.virtual_machines.get(resource_group_name, vm_name) + disk_encryption_settings = DiskEncryptionSettings(enabled=False) + vm.storage_profile.os_disk.encryption_settings = disk_encryption_settings + set_vm(cmd, vm) + + +def _show_vm_encryption_status_thru_new_ade(vm_instance_view): + ade = _find_existing_ade(vm_instance_view, use_instance_view=True) + disk_infos = [] + for div in vm_instance_view.instance_view.disks or []: + disk_infos.append({ + 'name': div.name, + 'encryptionSettings': div.encryption_settings, + 'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')], + }) + + return { + 'status': ade.statuses if ade else None, + 'substatus': ade.substatuses if ade else None, + 'disks': disk_infos + } + + +def show_vm_encryption_status(cmd, resource_group_name, vm_name): + encryption_status = { + 'osDisk': 'NotEncrypted', + 'osDiskEncryptionSettings': None, + 'dataDisk': 'NotEncrypted', + 'osType': None + } + compute_client = _compute_client_factory(cmd.cli_ctx) + vm = compute_client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView') + has_new_ade, has_old_ade = _detect_ade_status(vm) + if not has_new_ade and not has_old_ade: + logger.warning('Azure Disk Encryption is not enabled') + return None + if has_new_ade: + return _show_vm_encryption_status_thru_new_ade(vm) + is_linux = _is_linux_os(vm) + + # pylint: disable=no-member + # The following logic was mostly ported from xplat-cli + os_type = 'Linux' if is_linux else 'Windows' + encryption_status['osType'] = os_type + extension = vm_extension_info[os_type] + extension_result = compute_client.virtual_machine_extensions.get(resource_group_name, + vm_name, + extension['name'], + expand='instanceView') + logger.debug(extension_result) + if extension_result.instance_view and extension_result.instance_view.statuses: + encryption_status['progressMessage'] = extension_result.instance_view.statuses[0].message + + substatus_message = None + if getattr(extension_result.instance_view, 'substatuses', None): + substatus_message = extension_result.instance_view.substatuses[0].message + + encryption_status['osDiskEncryptionSettings'] = vm.storage_profile.os_disk.encryption_settings + + import json + if is_linux: + try: + message_object = json.loads(substatus_message) + except Exception: # pylint: disable=broad-except + message_object = None # might be from outdated extension + + if message_object and ('os' in message_object): + encryption_status['osDisk'] = message_object['os'] + else: + encryption_status['osDisk'] = 'Unknown' + + if message_object and 'data' in message_object: + encryption_status['dataDisk'] = message_object['data'] + else: + encryption_status['dataDisk'] = 'Unknown' + else: + # Windows - get os and data volume encryption state from the vm model + if (encryption_status['osDiskEncryptionSettings'] and + encryption_status['osDiskEncryptionSettings'].enabled and + encryption_status['osDiskEncryptionSettings'].disk_encryption_key and + encryption_status['osDiskEncryptionSettings'].disk_encryption_key.secret_url): + encryption_status['osDisk'] = _STATUS_ENCRYPTED + else: + encryption_status['osDisk'] = 'Unknown' + + if extension_result.provisioning_state == 'Succeeded': + volume_type = extension_result.settings.get('VolumeType', None) + about_data_disk = not volume_type or volume_type.lower() != 'os' + if about_data_disk and extension_result.settings.get('EncryptionOperation', None) == 'EnableEncryption': + encryption_status['dataDisk'] = _STATUS_ENCRYPTED + + return encryption_status + + +def _get_keyvault_key_url(cli_ctx, keyvault_name, key_name): + vault_base_url = get_key_vault_base_url(cli_ctx, keyvault_name) + client = create_data_plane_keyvault_key_client(cli_ctx, vault_base_url) + key = client.get_key(key_name) + return key.id + + +def _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force): + if is_linux: + volume_type = volume_type or _DATA_VOLUME_TYPE + if volume_type != _DATA_VOLUME_TYPE: + msg = 'OS disk encyrption is not yet supported for Linux VM scale sets' + if force: + logger.warning(msg) + else: + from knack.util import CLIError + raise CLIError(msg) + else: + volume_type = volume_type or _ALL_VOLUME_TYPE + return volume_type + + +def encrypt_vmss(cmd, resource_group_name, vmss_name, # pylint: disable=too-many-locals, too-many-statements + disk_encryption_keyvault, + key_encryption_keyvault=None, + key_encryption_key=None, + key_encryption_algorithm='RSA-OAEP', + volume_type=None, + force=False): + from azure.mgmt.core.tools import parse_resource_id + + # pylint: disable=no-member + UpgradeMode, VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models( + 'UpgradeMode', 'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile') + + compute_client = _compute_client_factory(cmd.cli_ctx) + vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + is_linux = _is_linux_os(vmss.virtual_machine_profile) + extension = vm_extension_info['Linux' if is_linux else 'Windows'] + + # 1. First validate arguments + volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force) + + # retrieve keyvault details + disk_encryption_keyvault_url = get_key_vault_base_url(cmd.cli_ctx, + (parse_resource_id(disk_encryption_keyvault))['name']) + + # disk encryption key itself can be further protected, so let us verify + if key_encryption_key: + key_encryption_keyvault = key_encryption_keyvault or disk_encryption_keyvault + + # to avoid bad server errors, ensure the vault has the right configurations + _verify_keyvault_good_for_encryption(cmd.cli_ctx, disk_encryption_keyvault, key_encryption_keyvault, vmss, force) + + # if key name and not key url, get url. + if key_encryption_key and '://' not in key_encryption_key: + key_encryption_key = _get_keyvault_key_url( + cmd.cli_ctx, (parse_resource_id(key_encryption_keyvault))['name'], key_encryption_key) + + # 2. we are ready to provision/update the disk encryption extensions + public_config = { + 'KeyVaultURL': disk_encryption_keyvault_url, + 'KeyEncryptionKeyURL': key_encryption_key or '', + "KeyVaultResourceId": disk_encryption_keyvault, + "KekVaultResourceId": key_encryption_keyvault if key_encryption_key else '', + 'KeyEncryptionAlgorithm': key_encryption_algorithm if key_encryption_key else '', + 'VolumeType': volume_type, + 'EncryptionOperation': 'EnableEncryption' + } + + ext = VirtualMachineScaleSetExtension(name=extension['name'], + publisher=extension['publisher'], + type_properties_type=extension['name'], + type_handler_version=extension['version'], + settings=public_config, + auto_upgrade_minor_version=True, + force_update_tag=uuid.uuid4()) + exts = [ext] + + # remove any old ade extensions set by this command and add the new one. + vmss_ext_profile = vmss.virtual_machine_profile.extension_profile + if vmss_ext_profile and vmss_ext_profile.extensions: + exts.extend(old_ext for old_ext in vmss.virtual_machine_profile.extension_profile.extensions + if old_ext.type != ext.type or old_ext.name != ext.name) + vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=exts) + + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + + poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) + LongRunningOperation(cmd.cli_ctx)(poller) + _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, True) + + +def decrypt_vmss(cmd, resource_group_name, vmss_name, volume_type=None, force=False): + UpgradeMode, VirtualMachineScaleSetExtension = cmd.get_models('UpgradeMode', 'VirtualMachineScaleSetExtension') + compute_client = _compute_client_factory(cmd.cli_ctx) + vmss = compute_client.virtual_machine_scale_sets.get(resource_group_name, vmss_name) + is_linux = _is_linux_os(vmss.virtual_machine_profile) + extension = vm_extension_info['Linux' if is_linux else 'Windows'] + + # 1. be nice, figure out the default volume type + volume_type = _handles_default_volume_type_for_vmss_encryption(is_linux, volume_type, force) + + # 2. update the disk encryption extension + public_config = { + 'VolumeType': volume_type, + 'EncryptionOperation': 'DisableEncryption', + } + + ext = VirtualMachineScaleSetExtension(name=extension['name'], + publisher=extension['publisher'], + type_properties_type=extension['name'], + type_handler_version=extension['version'], + settings=public_config, + auto_upgrade_minor_version=True, + force_update_tag=uuid.uuid4()) + if (not vmss.virtual_machine_profile.extension_profile or + not vmss.virtual_machine_profile.extension_profile.extensions): + extensions = [] + else: + extensions = vmss.virtual_machine_profile.extension_profile.extensions + + ade_extension = [x for x in extensions if + x.type_properties_type.lower() == extension['name'].lower() and x.publisher.lower() == extension[ + 'publisher'].lower()] # pylint: disable=line-too-long + if not ade_extension: + from knack.util import CLIError + raise CLIError("VM scale set '{}' was not encrypted".format(vmss_name)) + + index = vmss.virtual_machine_profile.extension_profile.extensions.index(ade_extension[0]) + vmss.virtual_machine_profile.extension_profile.extensions[index] = ext + + # Avoid unnecessary permission error + vmss.virtual_machine_profile.storage_profile.image_reference = None + + poller = compute_client.virtual_machine_scale_sets.begin_create_or_update(resource_group_name, vmss_name, vmss) + LongRunningOperation(cmd.cli_ctx)(poller) + _show_post_action_message(resource_group_name, vmss.name, vmss.upgrade_policy.mode == UpgradeMode.manual, False) + + +def _show_post_action_message(resource_group_name, vmss_name, maunal_mode, enable): + msg = '' + if maunal_mode: + msg = ("With manual upgrade mode, you will need to run 'az vmss update-instances -g {} -n {} " + "--instance-ids \"*\"' to propagate the change.\n".format(resource_group_name, vmss_name)) + msg += ("Note, {} encryption will take a while to finish. Please query the status using " + "'az vmss encryption show -g {} -n {}'. For Linux VM, you will lose the access during the period".format( + 'enabling' if enable else 'disabling', resource_group_name, vmss_name)) + logger.warning(msg) + + +def show_vmss_encryption_status(cmd, resource_group_name, vmss_name): + client = _compute_client_factory(cmd.cli_ctx) + vm_instances = list(client.virtual_machine_scale_set_vms.list(resource_group_name, vmss_name, + select='instanceView', expand='instanceView')) + result = [] + for instance in vm_instances: + view = instance.instance_view + disk_infos = [] + vm_enc_info = { + 'id': instance.id, + 'disks': disk_infos + } + for div in view.disks: + disk_infos.append({ + 'name': div.name, + 'encryptionSettings': div.encryption_settings, + 'statuses': [x for x in (div.statuses or []) if (x.code or '').startswith('EncryptionState')] + }) + + result.append(vm_enc_info) + return result + + +def _verify_keyvault_good_for_encryption(cli_ctx, disk_vault_id, kek_vault_id, vm_or_vmss, force): + def _report_client_side_validation_error(msg): + if force: + logger.warning("WARNING: %s %s", msg, "Encryption might fail.") + else: + from knack.util import CLIError + raise CLIError("ERROR: {}".format(msg)) + + resource_type = "VMSS" if vm_or_vmss.type.lower().endswith("virtualmachinescalesets") else "VM" + + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + from azure.mgmt.core.tools import parse_resource_id + + client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults + disk_vault_resource_info = parse_resource_id(disk_vault_id) + key_vault = client.get(disk_vault_resource_info['resource_group'], disk_vault_resource_info['name']) + + # ensure vault has 'EnabledForDiskEncryption' permission + if not key_vault.properties or not key_vault.properties.enabled_for_disk_encryption: + _report_client_side_validation_error("Keyvault '{}' is not enabled for disk encryption.".format( + disk_vault_resource_info['resource_name'])) + + if kek_vault_id: + kek_vault_info = parse_resource_id(kek_vault_id) + if disk_vault_resource_info['name'].lower() != kek_vault_info['name'].lower(): + client.get(kek_vault_info['resource_group'], kek_vault_info['name']) + + # verify subscription mataches + vm_vmss_resource_info = parse_resource_id(vm_or_vmss.id) + if vm_vmss_resource_info['subscription'].lower() != disk_vault_resource_info['subscription'].lower(): + _report_client_side_validation_error("{} {}'s subscription does not match keyvault's subscription." + .format(resource_type, vm_vmss_resource_info['name'])) + + # verify region matches + if key_vault.location.replace(' ', '').lower() != vm_or_vmss.location.replace(' ', '').lower(): + _report_client_side_validation_error( + "{} {}'s region does not match keyvault's region.".format(resource_type, vm_vmss_resource_info['name'])) diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/_params.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/_params.py new file mode 100644 index 00000000000..6edc353cdef --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/_params.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements + + +def load_arguments(self, _): + pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/commands.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/commands.py new file mode 100644 index 00000000000..079c67317aa --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/commands.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements +from azure.cli.command_modules.vm.azure_stack._format import ( + transform_disk_show_table_output, + transform_vmss_list_with_zones_table_output) + +from .operations._util import import_aaz_by_profile + + +def load_command_table(self, _): + Disk = import_aaz_by_profile("disk") + self.command_table['disk list'] = Disk.List(loader=self, table_transformer='[].' + transform_disk_show_table_output) + self.command_table['disk show'] = Disk.Show(loader=self, table_transformer=transform_disk_show_table_output) + + VMSS = import_aaz_by_profile("vmss") + self.command_table['vmss list'] = VMSS.List(loader=self, + table_transformer=transform_vmss_list_with_zones_table_output) + + from .operations.capacity_reservation_group import CapacityReservationGroupList + self.command_table['capacity reservation group list'] = CapacityReservationGroupList(loader=self) diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/__init__.py new file mode 100644 index 00000000000..fca9bdf191e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-import +from ._util import import_aaz_by_profile diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/_util.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/_util.py new file mode 100644 index 00000000000..a03409c27c4 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/_util.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +import importlib + + +def import_aaz_by_profile(module_name): + return importlib.import_module(f"azure.cli.command_modules.vm.aaz.profile_2018_03_01_hybrid.{module_name}") diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/capacity_reservation_group.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/capacity_reservation_group.py new file mode 100644 index 00000000000..0c447c7be0e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2018_03_01_hybrid/operations/capacity_reservation_group.py @@ -0,0 +1,42 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger +from ._util import import_aaz_by_profile + +logger = get_logger(__name__) + +_CapacityReservationGroup = import_aaz_by_profile("capacity.reservation.group") + + +class CapacityReservationGroupList(_CapacityReservationGroup.List): + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + from azure.cli.core.aaz import AAZBoolArg + args_schema = super()._build_arguments_schema(*args, **kwargs) + args_schema.vm_instance = AAZBoolArg( + options=['--vm-instance'], + help="Retrieve the Virtual Machine Instance " + "which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.vmss_instance = AAZBoolArg( + options=['--vmss-instance'], + help="Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.expand._registered = False + return args_schema + + def pre_operations(self): + from azure.cli.core.aaz import has_value + args = self.ctx.args + if args.vm_instance: + args.expand = "virtualMachines/$ref" + if args.vmss_instance: + if has_value(args.expand): + args.expand = args.expand.to_serialized_data() + ",virtualMachineScaleSetVMs/$ref" + else: + args.expand = "virtualMachineScaleSetVMs/$ref" diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/_params.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/_params.py new file mode 100644 index 00000000000..6edc353cdef --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/_params.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements + + +def load_arguments(self, _): + pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/commands.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/commands.py new file mode 100644 index 00000000000..079c67317aa --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/commands.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements +from azure.cli.command_modules.vm.azure_stack._format import ( + transform_disk_show_table_output, + transform_vmss_list_with_zones_table_output) + +from .operations._util import import_aaz_by_profile + + +def load_command_table(self, _): + Disk = import_aaz_by_profile("disk") + self.command_table['disk list'] = Disk.List(loader=self, table_transformer='[].' + transform_disk_show_table_output) + self.command_table['disk show'] = Disk.Show(loader=self, table_transformer=transform_disk_show_table_output) + + VMSS = import_aaz_by_profile("vmss") + self.command_table['vmss list'] = VMSS.List(loader=self, + table_transformer=transform_vmss_list_with_zones_table_output) + + from .operations.capacity_reservation_group import CapacityReservationGroupList + self.command_table['capacity reservation group list'] = CapacityReservationGroupList(loader=self) diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/__init__.py new file mode 100644 index 00000000000..fca9bdf191e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-import +from ._util import import_aaz_by_profile diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/_util.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/_util.py new file mode 100644 index 00000000000..4181858cd87 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/_util.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +import importlib + + +def import_aaz_by_profile(module_name): + return importlib.import_module(f"azure.cli.command_modules.vm.aaz.profile_2019_03_01_hybrid.{module_name}") diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/capacity_reservation_group.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/capacity_reservation_group.py new file mode 100644 index 00000000000..0c447c7be0e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2019_03_01_hybrid/operations/capacity_reservation_group.py @@ -0,0 +1,42 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger +from ._util import import_aaz_by_profile + +logger = get_logger(__name__) + +_CapacityReservationGroup = import_aaz_by_profile("capacity.reservation.group") + + +class CapacityReservationGroupList(_CapacityReservationGroup.List): + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + from azure.cli.core.aaz import AAZBoolArg + args_schema = super()._build_arguments_schema(*args, **kwargs) + args_schema.vm_instance = AAZBoolArg( + options=['--vm-instance'], + help="Retrieve the Virtual Machine Instance " + "which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.vmss_instance = AAZBoolArg( + options=['--vmss-instance'], + help="Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.expand._registered = False + return args_schema + + def pre_operations(self): + from azure.cli.core.aaz import has_value + args = self.ctx.args + if args.vm_instance: + args.expand = "virtualMachines/$ref" + if args.vmss_instance: + if has_value(args.expand): + args.expand = args.expand.to_serialized_data() + ",virtualMachineScaleSetVMs/$ref" + else: + args.expand = "virtualMachineScaleSetVMs/$ref" diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/_params.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/_params.py new file mode 100644 index 00000000000..6edc353cdef --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/_params.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements + + +def load_arguments(self, _): + pass diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/commands.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/commands.py new file mode 100644 index 00000000000..4eb2acd1111 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/commands.py @@ -0,0 +1,26 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-argument, too-many-locals, too-many-branches, too-many-statements +from azure.cli.command_modules.vm.azure_stack._format import ( + transform_disk_show_table_output, + transform_vmss_list_with_zones_table_output) + +from .operations._util import import_aaz_by_profile + + +def load_command_table(self, _): + from .operations.ppg import PPGShow + self.command_table["ppg show"] = PPGShow(loader=self) + + Disk = import_aaz_by_profile("disk") + self.command_table['disk list'] = Disk.List(loader=self, table_transformer='[].' + transform_disk_show_table_output) + self.command_table['disk show'] = Disk.Show(loader=self, table_transformer=transform_disk_show_table_output) + + VMSS = import_aaz_by_profile("vmss") + self.command_table['vmss list'] = VMSS.List(loader=self, + table_transformer=transform_vmss_list_with_zones_table_output) + + from .operations.capacity_reservation_group import CapacityReservationGroupList + self.command_table['capacity reservation group list'] = CapacityReservationGroupList(loader=self) diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/__init__.py new file mode 100644 index 00000000000..fca9bdf191e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=unused-import +from ._util import import_aaz_by_profile diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/_util.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/_util.py new file mode 100644 index 00000000000..073ba4ab632 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/_util.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +import importlib + + +def import_aaz_by_profile(module_name): + return importlib.import_module(f"azure.cli.command_modules.vm.aaz.profile_2020_09_01_hybrid.{module_name}") diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/capacity_reservation_group.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/capacity_reservation_group.py new file mode 100644 index 00000000000..0c447c7be0e --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/capacity_reservation_group.py @@ -0,0 +1,42 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger +from ._util import import_aaz_by_profile + +logger = get_logger(__name__) + +_CapacityReservationGroup = import_aaz_by_profile("capacity.reservation.group") + + +class CapacityReservationGroupList(_CapacityReservationGroup.List): + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + from azure.cli.core.aaz import AAZBoolArg + args_schema = super()._build_arguments_schema(*args, **kwargs) + args_schema.vm_instance = AAZBoolArg( + options=['--vm-instance'], + help="Retrieve the Virtual Machine Instance " + "which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.vmss_instance = AAZBoolArg( + options=['--vmss-instance'], + help="Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.expand._registered = False + return args_schema + + def pre_operations(self): + from azure.cli.core.aaz import has_value + args = self.ctx.args + if args.vm_instance: + args.expand = "virtualMachines/$ref" + if args.vmss_instance: + if has_value(args.expand): + args.expand = args.expand.to_serialized_data() + ",virtualMachineScaleSetVMs/$ref" + else: + args.expand = "virtualMachineScaleSetVMs/$ref" diff --git a/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/ppg.py b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/ppg.py new file mode 100644 index 00000000000..87c5ece90b0 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/azure_stack/profile_2020_09_01_hybrid/operations/ppg.py @@ -0,0 +1,24 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger +from ._util import import_aaz_by_profile + +logger = get_logger(__name__) + +_PPG = import_aaz_by_profile("ppg") + + +class PPGShow(_PPG.Show): + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + args_schema = super()._build_arguments_schema(*args, **kwargs) + + from azure.cli.core.aaz import AAZArgEnum + args_schema.include_colocation_status._blank = "True" + args_schema.include_colocation_status.enum = AAZArgEnum({"True": "True", "False": "False"}) + + return args_schema diff --git a/src/azure-cli/azure/cli/command_modules/vm/commands.py b/src/azure-cli/azure/cli/command_modules/vm/commands.py index a6c8f8aa4b2..47540fb3659 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/commands.py +++ b/src/azure-cli/azure/cli/command_modules/vm/commands.py @@ -26,7 +26,7 @@ transform_disk_create_table_output, transform_sku_for_table_output, transform_disk_show_table_output, transform_extension_show_table_output, get_vmss_table_output_transformer, transform_vm_encryption_show_table_output, transform_log_analytics_query_output, - transform_vmss_list_with_zones_table_output, transform_vmss_list_without_zones_table_output) + transform_vmss_list_with_zones_table_output) from azure.cli.command_modules.vm._validators import ( process_vm_create_namespace, process_vmss_create_namespace, process_image_create_namespace, process_disk_create_namespace, process_snapshot_create_namespace, @@ -234,7 +234,7 @@ def load_command_table(self, _): client_factory=cf_community_gallery_image_version) with self.command_group("ppg"): - from .custom import PPGShow + from .operations.ppg import PPGShow self.command_table["ppg show"] = PPGShow(loader=self) with self.command_group('disk', compute_disk_sdk, operation_group='disks', min_api='2017-03-30') as g: @@ -242,10 +242,9 @@ def load_command_table(self, _): g.custom_command('grant-access', 'grant_disk_access') g.generic_update_command('update', custom_func_name='update_managed_disk', setter_name='begin_create_or_update', setter_arg_name='disk', supports_no_wait=True) - from azure.cli.command_modules.vm._vm_utils import import_aaz_by_profile - Disk = import_aaz_by_profile(self.cli_ctx.cloud.profile, "disk") - self.command_table['disk list'] = Disk.List(loader=self, table_transformer='[].' + transform_disk_show_table_output) - self.command_table['disk show'] = Disk.Show(loader=self, table_transformer=transform_disk_show_table_output) + from .aaz.latest.disk import List as DiskList, Show as DiskShow + self.command_table['disk list'] = DiskList(loader=self, table_transformer='[].' + transform_disk_show_table_output) + self.command_table['disk show'] = DiskShow(loader=self, table_transformer=transform_disk_show_table_output) with self.command_group('disk-encryption-set', compute_disk_encryption_set_sdk, operation_group='disk_encryption_sets', client_factory=cf_disk_encryption_set, min_api='2019-07-01') as g: g.custom_command('create', 'create_disk_encryption_set', supports_no_wait=True) @@ -459,18 +458,12 @@ def load_command_table(self, _): g.wait_command('wait', getter_name='get_vmss', getter_type=compute_custom) g.custom_command('set-orchestration-service-state', 'set_orchestration_service_state', supports_no_wait=True) - from azure.cli.command_modules.vm._vm_utils import import_aaz_by_profile - VMSS = import_aaz_by_profile(self.cli_ctx.cloud.profile, "vmss") - if self.supported_api_version(min_api='2017-03-30'): - self.command_table['vmss list'] = VMSS.List(loader=self, - table_transformer=transform_vmss_list_with_zones_table_output) - else: - self.command_table['vmss list'] = VMSS.List(loader=self, - table_transformer=transform_vmss_list_without_zones_table_output) + from .aaz.latest.vmss import List as VMSSList + self.command_table['vmss list'] = VMSSList(loader=self, + table_transformer=transform_vmss_list_with_zones_table_output) - if self.cli_ctx.cloud.profile == 'latest': - from .custom import VMSSListInstances - self.command_table['vmss list-instances'] = VMSSListInstances(loader=self) + from .operations.vmss import VMSSListInstances + self.command_table['vmss list-instances'] = VMSSListInstances(loader=self) with self.command_group('vmss diagnostics', compute_vmss_sdk) as g: g.custom_command('set', 'set_vmss_diagnostics_extension') @@ -606,7 +599,7 @@ def load_command_table(self, _): g.custom_show_command('show', 'show_capacity_reservation_group') with self.command_group('capacity reservation group'): - from .custom import CapacityReservationGroupList + from .operations.capacity_reservation_group import CapacityReservationGroupList self.command_table['capacity reservation group list'] = CapacityReservationGroupList(loader=self) with self.command_group('capacity reservation', capacity_reservations_sdk, min_api='2021-04-01', diff --git a/src/azure-cli/azure/cli/command_modules/vm/custom.py b/src/azure-cli/azure/cli/command_modules/vm/custom.py index 62729392567..5da696fbb4e 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/custom.py +++ b/src/azure-cli/azure/cli/command_modules/vm/custom.py @@ -46,9 +46,6 @@ load_images_thru_services, _get_latest_image_version) from ._client_factory import (_compute_client_factory, cf_vm_image_term, _dev_test_labs_client_factory) -from .aaz.latest.ppg import Show as _PPGShow -from .aaz.latest.vmss import ListInstances as _VMSSListInstances -from .aaz.latest.capacity.reservation.group import List as _CapacityReservationGroupList from .aaz.latest.vm.disk import AttachDetachDataDisk from .generated.custom import * # noqa: F403, pylint: disable=unused-wildcard-import,wildcard-import @@ -5725,37 +5722,6 @@ def show_capacity_reservation_group(client, resource_group_name, capacity_reserv expand=expand) -class CapacityReservationGroupList(_CapacityReservationGroupList): - @classmethod - def _build_arguments_schema(cls, *args, **kwargs): - from azure.cli.core.aaz import AAZBoolArg - args_schema = super()._build_arguments_schema(*args, **kwargs) - args_schema.vm_instance = AAZBoolArg( - options=['--vm-instance'], - help="Retrieve the Virtual Machine Instance " - "which are associated to capacity reservation group in the response.", - nullable=True - ) - args_schema.vmss_instance = AAZBoolArg( - options=['--vmss-instance'], - help="Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.", - nullable=True - ) - args_schema.expand._registered = False - return args_schema - - def pre_operations(self): - from azure.cli.core.aaz import has_value - args = self.ctx.args - if args.vm_instance: - args.expand = "virtualMachines/$ref" - if args.vmss_instance: - if has_value(args.expand): - args.expand = args.expand.to_serialized_data() + ",virtualMachineScaleSetVMs/$ref" - else: - args.expand = "virtualMachineScaleSetVMs/$ref" - - def create_capacity_reservation(cmd, client, resource_group_name, capacity_reservation_group_name, capacity_reservation_name, location=None, sku_name=None, capacity=None, zone=None, tags=None): @@ -6162,33 +6128,3 @@ def sig_community_image_version_list(client, location, public_gallery_name, gall gallery_image_name=gallery_image_name) return get_page_result(generator, marker, show_next_marker) # endRegion - - -class PPGShow(_PPGShow): - - @classmethod - def _build_arguments_schema(cls, *args, **kwargs): - args_schema = super()._build_arguments_schema(*args, **kwargs) - - from azure.cli.core.aaz import AAZArgEnum - args_schema.include_colocation_status._blank = "True" - args_schema.include_colocation_status.enum = AAZArgEnum({"True": "True", "False": "False"}) - - return args_schema - - -class VMSSListInstances(_VMSSListInstances): - def _output(self, *args, **kwargs): - from azure.cli.core.aaz import AAZUndefined, has_value - - # Resolve flatten conflict - # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied - for value in self.ctx.vars.instance.value: - if has_value(value.resources): - for resource in value.resources: - if has_value(resource.type): - resource.type = AAZUndefined - - result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) - next_link = self.deserialize_output(self.ctx.vars.instance.next_link) - return result, next_link diff --git a/src/azure-cli/azure/cli/command_modules/vm/linter_exclusions.yml b/src/azure-cli/azure/cli/command_modules/vm/linter_exclusions.yml index 92aa4b3f2e6..87b04080279 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/linter_exclusions.yml +++ b/src/azure-cli/azure/cli/command_modules/vm/linter_exclusions.yml @@ -2,11 +2,15 @@ # exclusions for the vm module vmss application set: + rule_exclusions: + - missing_command_test_coverage parameters: treat_deployment_as_failure: rule_exclusions: - option_length_too_long vmss create: + rule_exclusions: + - missing_command_test_coverage parameters: upgrade_policy_mode: rule_exclusions: @@ -63,6 +67,8 @@ vmss create: rule_exclusions: - option_length_too_long vmss update: + rule_exclusions: + - missing_command_test_coverage parameters: enable_cross_zone_upgrade: rule_exclusions: @@ -95,16 +101,22 @@ vmss update: rule_exclusions: - option_length_too_long vmss encryption enable: + rule_exclusions: + - missing_command_test_coverage parameters: key_encryption_algorithm: rule_exclusions: - missing_parameter_help vm application set: + rule_exclusions: + - missing_command_test_coverage parameters: treat_deployment_as_failure: rule_exclusions: - option_length_too_long vm create: + rule_exclusions: + - missing_command_test_coverage parameters: public_ip_address_allocation: rule_exclusions: @@ -125,19 +137,26 @@ vm create: rule_exclusions: - option_length_too_long vm unmanaged-disk attach: + rule_exclusions: + - missing_command_test_coverage parameters: size_gb: rule_exclusions: - missing_parameter_help vm encryption enable: + rule_exclusions: + - missing_command_test_coverage parameters: key_encryption_algorithm: rule_exclusions: - missing_parameter_help vm extension list: rule_exclusions: + - missing_command_test_coverage - no_ids_for_list_commands vm install-patches: + rule_exclusions: + - missing_command_test_coverage parameters: classifications_to_include_win: rule_exclusions: @@ -155,21 +174,29 @@ vm install-patches: rule_exclusions: - option_length_too_long image create: + rule_exclusions: + - missing_command_test_coverage parameters: os_type: rule_exclusions: - missing_parameter_help snapshot create: + rule_exclusions: + - missing_command_test_coverage parameters: sku: rule_exclusions: - missing_parameter_help snapshot update: + rule_exclusions: + - missing_command_test_coverage parameters: sku: rule_exclusions: - missing_parameter_help sig image-version create: + rule_exclusions: + - missing_command_test_coverage parameters: target_region_cvm_encryption: rule_exclusions: @@ -178,16 +205,22 @@ sig image-version create: rule_exclusions: - option_length_too_long sig image-version update: + rule_exclusions: + - missing_command_test_coverage parameters: allow_replicated_location_deletion: rule_exclusions: - option_length_too_long sig image-version undelete: + rule_exclusions: + - missing_command_test_coverage parameters: allow_replicated_location_deletion: rule_exclusions: - option_length_too_long restore-point create: + rule_exclusions: + - missing_command_test_coverage parameters: data_disk_restore_point_encryption_set: rule_exclusions: @@ -205,8 +238,583 @@ restore-point create: rule_exclusions: - option_length_too_long disk create: + rule_exclusions: + - missing_command_test_coverage parameters: optimized_for_frequent_attach: rule_exclusions: - option_length_too_long +disk grant-access: + rule_exclusions: + - missing_command_test_coverage +disk update: + rule_exclusions: + - missing_command_test_coverage +disk-encryption-set create: + rule_exclusions: + - missing_command_test_coverage +disk-encryption-set update: + rule_exclusions: + - missing_command_test_coverage +disk-encryption-set identity assign: + rule_exclusions: + - missing_command_test_coverage +disk-encryption-set identity remove: + rule_exclusions: + - missing_command_test_coverage +disk-encryption-set identity show: + rule_exclusions: + - missing_command_test_coverage +disk-access create: + rule_exclusions: + - missing_command_test_coverage +disk-access update: + rule_exclusions: + - missing_command_test_coverage +image update: + rule_exclusions: + - missing_command_test_coverage +image builder create: + rule_exclusions: + - missing_command_test_coverage +image builder list: + rule_exclusions: + - missing_command_test_coverage +image builder show: + rule_exclusions: + - missing_command_test_coverage +image builder delete: + rule_exclusions: + - missing_command_test_coverage +image builder update: + rule_exclusions: + - missing_command_test_coverage +image builder wait: + rule_exclusions: + - missing_command_test_coverage +image builder run: + rule_exclusions: + - missing_command_test_coverage +image builder show-runs: + rule_exclusions: + - missing_command_test_coverage +image builder cancel: + rule_exclusions: + - missing_command_test_coverage +image builder identity assign: + rule_exclusions: + - missing_command_test_coverage +image builder identity remove: + rule_exclusions: + - missing_command_test_coverage +image builder identity show: + rule_exclusions: + - missing_command_test_coverage +image builder customizer add: + rule_exclusions: + - missing_command_test_coverage +image builder customizer remove: + rule_exclusions: + - missing_command_test_coverage +image builder customizer clear: + rule_exclusions: + - missing_command_test_coverage +image builder output add: + rule_exclusions: + - missing_command_test_coverage +image builder output remove: + rule_exclusions: + - missing_command_test_coverage +image builder output clear: + rule_exclusions: + - missing_command_test_coverage +image builder output versioning set: + rule_exclusions: + - missing_command_test_coverage +image builder output versioning remove: + rule_exclusions: + - missing_command_test_coverage +image builder output versioning show: + rule_exclusions: + - missing_command_test_coverage +image builder validator add: + rule_exclusions: + - missing_command_test_coverage +image builder validator remove: + rule_exclusions: + - missing_command_test_coverage +image builder validator show: + rule_exclusions: + - missing_command_test_coverage +image builder optimizer add: + rule_exclusions: + - missing_command_test_coverage +image builder optimizer update: + rule_exclusions: + - missing_command_test_coverage +image builder optimizer remove: + rule_exclusions: + - missing_command_test_coverage +image builder optimizer show: + rule_exclusions: + - missing_command_test_coverage +image builder error-handler add: + rule_exclusions: + - missing_command_test_coverage +image builder error-handler remove: + rule_exclusions: + - missing_command_test_coverage +image builder error-handler show: + rule_exclusions: + - missing_command_test_coverage +snapshot grant-access: + rule_exclusions: + - missing_command_test_coverage +vm identity assign: + rule_exclusions: + - missing_command_test_coverage +vm identity remove: + rule_exclusions: + - missing_command_test_coverage +vm identity show: + rule_exclusions: + - missing_command_test_coverage +vm application list: + rule_exclusions: + - missing_command_test_coverage +vm capture: + rule_exclusions: + - missing_command_test_coverage +vm delete: + rule_exclusions: + - missing_command_test_coverage +vm get-instance-view: + rule_exclusions: + - missing_command_test_coverage +vm list: + rule_exclusions: + - missing_command_test_coverage +vm list-ip-addresses: + rule_exclusions: + - missing_command_test_coverage +vm list-skus: + rule_exclusions: + - missing_command_test_coverage +vm list-usage: + rule_exclusions: + - missing_command_test_coverage +vm open-port: + rule_exclusions: + - missing_command_test_coverage +vm resize: + rule_exclusions: + - missing_command_test_coverage +vm restart: + rule_exclusions: + - missing_command_test_coverage +vm show: + rule_exclusions: + - missing_command_test_coverage +vm stop: + rule_exclusions: + - missing_command_test_coverage +vm update: + rule_exclusions: + - missing_command_test_coverage +vm wait: + rule_exclusions: + - missing_command_test_coverage +vm auto-shutdown: + rule_exclusions: + - missing_command_test_coverage +vm availability-set convert: + rule_exclusions: + - missing_command_test_coverage +vm availability-set create: + rule_exclusions: + - missing_command_test_coverage +vm availability-set list: + rule_exclusions: + - missing_command_test_coverage +vm availability-set update: + rule_exclusions: + - missing_command_test_coverage +vm boot-diagnostics disable: + rule_exclusions: + - missing_command_test_coverage +vm boot-diagnostics enable: + rule_exclusions: + - missing_command_test_coverage +vm boot-diagnostics get-boot-log: + rule_exclusions: + - missing_command_test_coverage +vm boot-diagnostics get-boot-log-uris: + rule_exclusions: + - missing_command_test_coverage +vm diagnostics set: + rule_exclusions: + - missing_command_test_coverage +vm diagnostics get-default-config: + rule_exclusions: + - missing_command_test_coverage +vm disk attach: + rule_exclusions: + - missing_command_test_coverage +vm disk detach: + rule_exclusions: + - missing_command_test_coverage +vm encryption disable: + rule_exclusions: + - missing_command_test_coverage +vm encryption show: + rule_exclusions: + - missing_command_test_coverage +vm extension show: + rule_exclusions: + - missing_command_test_coverage +vm extension set: + rule_exclusions: + - missing_command_test_coverage +vm extension wait: + rule_exclusions: + - missing_command_test_coverage +vm extension image list: + rule_exclusions: + - missing_command_test_coverage +vm image list-offers: + rule_exclusions: + - missing_command_test_coverage +vm image list-publishers: + rule_exclusions: + - missing_command_test_coverage +vm image list-skus: + rule_exclusions: + - missing_command_test_coverage +vm image list: + rule_exclusions: + - missing_command_test_coverage +vm image show: + rule_exclusions: + - missing_command_test_coverage +vm image terms accept: + rule_exclusions: + - missing_command_test_coverage +vm image terms cancel: + rule_exclusions: + - missing_command_test_coverage +vm image terms show: + rule_exclusions: + - missing_command_test_coverage +vm nic add: + rule_exclusions: + - missing_command_test_coverage +vm nic remove: + rule_exclusions: + - missing_command_test_coverage +vm nic set: + rule_exclusions: + - missing_command_test_coverage +vm nic show: + rule_exclusions: + - missing_command_test_coverage +vm nic list: + rule_exclusions: + - missing_command_test_coverage +vm run-command invoke: + rule_exclusions: + - missing_command_test_coverage +vm run-command list: + rule_exclusions: + - missing_command_test_coverage +vm run-command show: + rule_exclusions: + - missing_command_test_coverage +vm run-command create: + rule_exclusions: + - missing_command_test_coverage +vm run-command update: + rule_exclusions: + - missing_command_test_coverage +vm run-command delete: + rule_exclusions: + - missing_command_test_coverage +vm run-command wait: + rule_exclusions: + - missing_command_test_coverage +vm secret format: + rule_exclusions: + - missing_command_test_coverage +vm secret add: + rule_exclusions: + - missing_command_test_coverage +vm secret list: + rule_exclusions: + - missing_command_test_coverage +vm secret remove: + rule_exclusions: + - missing_command_test_coverage +vm unmanaged-disk detach: + rule_exclusions: + - missing_command_test_coverage +vm unmanaged-disk list: + rule_exclusions: + - missing_command_test_coverage +vm user update: + rule_exclusions: + - missing_command_test_coverage +vm user delete: + rule_exclusions: + - missing_command_test_coverage +vm user reset-ssh: + rule_exclusions: + - missing_command_test_coverage +vm host get-instance-view: + rule_exclusions: + - missing_command_test_coverage +vm host create: + rule_exclusions: + - missing_command_test_coverage +vm host update: + rule_exclusions: + - missing_command_test_coverage +vm host group get-instance-view: + rule_exclusions: + - missing_command_test_coverage +vm host group create: + rule_exclusions: + - missing_command_test_coverage +vm host group update: + rule_exclusions: + - missing_command_test_coverage +vmss identity assign: + rule_exclusions: + - missing_command_test_coverage +vmss identity remove: + rule_exclusions: + - missing_command_test_coverage +vmss identity show: + rule_exclusions: + - missing_command_test_coverage +vmss application list: + rule_exclusions: + - missing_command_test_coverage +vmss deallocate: + rule_exclusions: + - missing_command_test_coverage +vmss delete-instances: + rule_exclusions: + - missing_command_test_coverage +vmss get-instance-view: + rule_exclusions: + - missing_command_test_coverage +vmss list-instance-connection-info: + rule_exclusions: + - missing_command_test_coverage +vmss list-instance-public-ips: + rule_exclusions: + - missing_command_test_coverage +vmss reimage: + rule_exclusions: + - missing_command_test_coverage +vmss restart: + rule_exclusions: + - missing_command_test_coverage +vmss scale: + rule_exclusions: + - missing_command_test_coverage +vmss show: + rule_exclusions: + - missing_command_test_coverage +vmss start: + rule_exclusions: + - missing_command_test_coverage +vmss stop: + rule_exclusions: + - missing_command_test_coverage +vmss update-instances: + rule_exclusions: + - missing_command_test_coverage +vmss wait: + rule_exclusions: + - missing_command_test_coverage +vmss set-orchestration-service-state: + rule_exclusions: + - missing_command_test_coverage +vmss diagnostics set: + rule_exclusions: + - missing_command_test_coverage +vmss diagnostics get-default-config: + rule_exclusions: + - missing_command_test_coverage +vmss disk attach: + rule_exclusions: + - missing_command_test_coverage +vmss disk detach: + rule_exclusions: + - missing_command_test_coverage +vmss encryption disable: + rule_exclusions: + - missing_command_test_coverage +vmss encryption show: + rule_exclusions: + - missing_command_test_coverage +vmss extension delete: + rule_exclusions: + - missing_command_test_coverage +vmss extension show: + rule_exclusions: + - missing_command_test_coverage +vmss extension set: + rule_exclusions: + - missing_command_test_coverage +vmss extension list: + rule_exclusions: + - missing_command_test_coverage +vmss extension upgrade: + rule_exclusions: + - missing_command_test_coverage +vmss extension image list: + rule_exclusions: + - missing_command_test_coverage +vmss run-command invoke: + rule_exclusions: + - missing_command_test_coverage +vmss run-command list: + rule_exclusions: + - missing_command_test_coverage +vmss run-command show: + rule_exclusions: + - missing_command_test_coverage +vmss run-command create: + rule_exclusions: + - missing_command_test_coverage +vmss run-command update: + rule_exclusions: + - missing_command_test_coverage +vmss run-command delete: + rule_exclusions: + - missing_command_test_coverage +sig create: + rule_exclusions: + - missing_command_test_coverage +sig show: + rule_exclusions: + - missing_command_test_coverage +sig update: + rule_exclusions: + - missing_command_test_coverage +sig list-community: + rule_exclusions: + - missing_command_test_coverage +sig image-definition show-community: + rule_exclusions: + - missing_command_test_coverage +sig image-definition list-community: + rule_exclusions: + - missing_command_test_coverage +sig image-version list-community: + rule_exclusions: + - missing_command_test_coverage +sig image-definition create: + rule_exclusions: + - missing_command_test_coverage +sig image-definition update: + rule_exclusions: + - missing_command_test_coverage +sig image-version show: + rule_exclusions: + - missing_command_test_coverage +sig image-version wait: + rule_exclusions: + - missing_command_test_coverage +sig share add: + rule_exclusions: + - missing_command_test_coverage +sig share remove: + rule_exclusions: + - missing_command_test_coverage +sig share reset: + rule_exclusions: + - missing_command_test_coverage +sig share enable-community: + rule_exclusions: + - missing_command_test_coverage +sig share wait: + rule_exclusions: + - missing_command_test_coverage +sig image-definition list-shared: + rule_exclusions: + - missing_command_test_coverage +sig image-version list-shared: + rule_exclusions: + - missing_command_test_coverage +sig gallery-application create: + rule_exclusions: + - missing_command_test_coverage +sig gallery-application update: + rule_exclusions: + - missing_command_test_coverage +sig gallery-application wait: + rule_exclusions: + - missing_command_test_coverage +sig gallery-application version create: + rule_exclusions: + - missing_command_test_coverage +sig gallery-application version update: + rule_exclusions: + - missing_command_test_coverage +ppg create: + rule_exclusions: + - missing_command_test_coverage +ppg list: + rule_exclusions: + - missing_command_test_coverage +ppg update: + rule_exclusions: + - missing_command_test_coverage +vm monitor log show: + rule_exclusions: + - missing_command_test_coverage +vm monitor metrics tail: + rule_exclusions: + - missing_command_test_coverage +vm monitor metrics list-definitions: + rule_exclusions: + - missing_command_test_coverage +capacity reservation group create: + rule_exclusions: + - missing_command_test_coverage +capacity reservation group update: + rule_exclusions: + - missing_command_test_coverage +capacity reservation group show: + rule_exclusions: + - missing_command_test_coverage +capacity reservation create: + rule_exclusions: + - missing_command_test_coverage +capacity reservation update: + rule_exclusions: + - missing_command_test_coverage +capacity reservation show: + rule_exclusions: + - missing_command_test_coverage +restore-point show: + rule_exclusions: + - missing_command_test_coverage +restore-point wait: + rule_exclusions: + - missing_command_test_coverage +restore-point collection show: + rule_exclusions: + - missing_command_test_coverage +restore-point collection create: + rule_exclusions: + - missing_command_test_coverage +restore-point collection update: + rule_exclusions: + - missing_command_test_coverage +restore-point collection wait: + rule_exclusions: + - missing_command_test_coverage ... \ No newline at end of file diff --git a/src/azure-cli/azure/cli/command_modules/vm/operations/__init__.py b/src/azure-cli/azure/cli/command_modules/vm/operations/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/operations/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/azure-cli/azure/cli/command_modules/vm/operations/capacity_reservation_group.py b/src/azure-cli/azure/cli/command_modules/vm/operations/capacity_reservation_group.py new file mode 100644 index 00000000000..d3915266e16 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/operations/capacity_reservation_group.py @@ -0,0 +1,41 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger + +from ..aaz.latest.capacity.reservation.group import List as _CapacityReservationGroupList + +logger = get_logger(__name__) + + +class CapacityReservationGroupList(_CapacityReservationGroupList): + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + from azure.cli.core.aaz import AAZBoolArg + args_schema = super()._build_arguments_schema(*args, **kwargs) + args_schema.vm_instance = AAZBoolArg( + options=['--vm-instance'], + help="Retrieve the Virtual Machine Instance " + "which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.vmss_instance = AAZBoolArg( + options=['--vmss-instance'], + help="Retrieve the ScaleSet VM Instance which are associated to capacity reservation group in the response.", + nullable=True + ) + args_schema.expand._registered = False + return args_schema + + def pre_operations(self): + from azure.cli.core.aaz import has_value + args = self.ctx.args + if args.vm_instance: + args.expand = "virtualMachines/$ref" + if args.vmss_instance: + if has_value(args.expand): + args.expand = args.expand.to_serialized_data() + ",virtualMachineScaleSetVMs/$ref" + else: + args.expand = "virtualMachineScaleSetVMs/$ref" diff --git a/src/azure-cli/azure/cli/command_modules/vm/operations/ppg.py b/src/azure-cli/azure/cli/command_modules/vm/operations/ppg.py new file mode 100644 index 00000000000..be66970d835 --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/operations/ppg.py @@ -0,0 +1,23 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger + +from ..aaz.latest.ppg import Show as _PPGShow + +logger = get_logger(__name__) + + +class PPGShow(_PPGShow): + + @classmethod + def _build_arguments_schema(cls, *args, **kwargs): + args_schema = super()._build_arguments_schema(*args, **kwargs) + + from azure.cli.core.aaz import AAZArgEnum + args_schema.include_colocation_status._blank = "True" + args_schema.include_colocation_status.enum = AAZArgEnum({"True": "True", "False": "False"}) + + return args_schema diff --git a/src/azure-cli/azure/cli/command_modules/vm/operations/vmss.py b/src/azure-cli/azure/cli/command_modules/vm/operations/vmss.py new file mode 100644 index 00000000000..bd393a750af --- /dev/null +++ b/src/azure-cli/azure/cli/command_modules/vm/operations/vmss.py @@ -0,0 +1,27 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=no-self-use, line-too-long, protected-access, too-few-public-methods, unused-argument +from knack.log import get_logger + +from ..aaz.latest.vmss import ListInstances as _VMSSListInstances + +logger = get_logger(__name__) + + +class VMSSListInstances(_VMSSListInstances): + def _output(self, *args, **kwargs): + from azure.cli.core.aaz import AAZUndefined, has_value + + # Resolve flatten conflict + # When the type field conflicts, the type in inner layer is ignored and the outer layer is applied + for value in self.ctx.vars.instance.value: + if has_value(value.resources): + for resource in value.resources: + if has_value(resource.type): + resource.type = AAZUndefined + + result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True) + next_link = self.deserialize_output(self.ctx.vars.instance.next_link) + return result, next_link diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_custom_vm_commands.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_custom_vm_commands.py index 2301b360244..70390b1209b 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_custom_vm_commands.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_custom_vm_commands.py @@ -8,25 +8,20 @@ from knack.util import CLIError -from azure.cli.command_modules.vm.custom import (enable_boot_diagnostics, disable_boot_diagnostics, - _merge_secrets, BootLogStreamWriter, - _get_access_extension_upgrade_info, - _LINUX_ACCESS_EXT, - _WINDOWS_ACCESS_EXT, - _get_extension_instance_name, - get_boot_log) -from azure.cli.command_modules.vm.custom import \ +from azure.cli.command_modules.vm.azure_stack.custom import \ (attach_unmanaged_data_disk, detach_unmanaged_data_disk, get_vmss_instance_view) - +from azure.cli.command_modules.vm.azure_stack.custom import (enable_boot_diagnostics, disable_boot_diagnostics, + _merge_secrets, BootLogStreamWriter, + _get_access_extension_upgrade_info, + _LINUX_ACCESS_EXT, + _WINDOWS_ACCESS_EXT, + _get_extension_instance_name, + get_boot_log) +from azure.cli.command_modules.vm.azure_stack.disk_encryption import (encrypt_vm, decrypt_vm) from azure.cli.core import AzCommandsLoader from azure.cli.core.commands import AzCliCommand - - -from azure.cli.command_modules.vm.disk_encryption import (encrypt_vm, decrypt_vm, encrypt_vmss, decrypt_vmss) -from azure.cli.core.profiles import get_sdk, ResourceType - from azure.cli.core.mock import DummyCli - +from azure.cli.core.profiles import get_sdk, ResourceType NetworkProfile, StorageProfile, DataDisk, OSDisk, OperatingSystemTypes, InstanceViewStatus, \ VirtualMachineExtensionInstanceView, VirtualMachineExtension, ImageReference, DiskCreateOptionTypes, \ @@ -88,8 +83,8 @@ def test_get_access_extension_upgrade_info(self): self.assertEqual('1.5', version) self.assertEqual(True, auto_upgrade) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_get_to_update): vm_fake = mock.MagicMock() cmd = _get_test_cmd() @@ -101,8 +96,8 @@ def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_ self.assertTrue(mock_vm_get_to_update.called) mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get_to_update): vm_fake = mock.MagicMock() cmd = _get_test_cmd() @@ -115,8 +110,8 @@ def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get_to_update self.assertTrue(mock_vm_get_to_update.called) mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -141,8 +136,8 @@ def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get_to_upd self.assertTrue(data_disk.name.startswith('vm1-')) self.assertEqual(data_disk.vhd.uri, faked_vhd_uri) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -168,8 +163,8 @@ def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get_to_upda self.assertEqual(data_disk.lun, 0) # the existing disk has '1', so it verifes the second one be picked as '0' self.assertEqual(data_disk.vhd.uri, faked_vhd_uri2) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -194,8 +189,8 @@ def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get_to_update self.assertEqual(data_disk.name, 'd1') self.assertEqual(data_disk.vhd.uri, faked_vhd_uri) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long # stub to get the vm which has no datadisks @@ -213,7 +208,7 @@ def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get_to_update): mock_vm_set.assert_called_once_with(cmd, vm) self.assertEqual(len(vm.storage_profile.data_disks), 0) - @mock.patch('azure.cli.command_modules.vm.custom._compute_client_factory') + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom._compute_client_factory') def test_show_vmss_instance_view(self, factory_mock): vm_client = mock.MagicMock() cmd = _get_test_cmd() @@ -227,8 +222,8 @@ def test_show_vmss_instance_view(self, factory_mock): select='instanceView', expand='instanceView') # pylint: disable=line-too-long - @mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True) - @mock.patch('azure.cli.command_modules.vm.disk_encryption._get_keyvault_key_url', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._get_keyvault_key_url', autospec=True) def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, mock_compute_client_factory): faked_keyvault = '/subscriptions/01234567-1bf0-4dda-aec3-cb9272f09590/resourceGroups/rg1/providers/Microsoft.KeyVault/vaults/v1' os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux) @@ -254,8 +249,8 @@ def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, self.assertTrue("--aad-client-cert-thumbprint or --aad-client-secret" in str(context.exception)) - @mock.patch('azure.cli.command_modules.vm.disk_encryption.set_vm', autospec=True) - @mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._compute_client_factory', autospec=True) def test_disable_encryption_error_cases_handling(self, mock_compute_client_factory, mock_vm_set): # pylint: disable=unused-argument os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux) existing_disk = DataDisk(lun=1, vhd='https://someuri', name='d1', create_option=DiskCreateOptionTypes.empty) @@ -363,7 +358,7 @@ def test_get_extension_instance_name_when_type_none(self): class TestVMBootLog(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm.custom.logger.warning') + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.logger.warning') def test_vm_boot_log_handle_unicode(self, logger_warning__mock): import sys writer = BootLogStreamWriter(sys.stdout) diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_actions.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_actions.py index a09c817f9da..d197511dd6d 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_actions.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_actions.py @@ -10,18 +10,18 @@ from unittest import mock from azure.cli.core.keys import is_valid_ssh_rsa_public_key -from azure.cli.command_modules.vm._validators import (validate_ssh_key, - _figure_out_storage_source, - _validate_admin_username, - _validate_admin_password, - _parse_image_argument, - process_disk_create_namespace, - process_snapshot_create_namespace, - _validate_vmss_create_subnet, - _get_next_subnet_addr_suffix, - _validate_vm_vmss_msi, - _validate_vm_vmss_accelerated_networking) -from azure.cli.command_modules.vm._vm_utils import normalize_disk_info +from azure.cli.command_modules.vm.azure_stack._validators import (validate_ssh_key, + _figure_out_storage_source, + _validate_admin_username, + _validate_admin_password, + _parse_image_argument, + process_disk_create_namespace, + process_snapshot_create_namespace, + _validate_vmss_create_subnet, + _get_next_subnet_addr_suffix, + _validate_vm_vmss_msi, + _validate_vm_vmss_accelerated_networking) +from azure.cli.command_modules.vm.azure_stack._vm_utils import normalize_disk_info from azure.cli.core.mock import DummyCli from azure.mgmt.compute.models import CachingTypes from knack.util import CLIError @@ -191,7 +191,7 @@ def _verify_password_with_ex(self, admin_password, is_linux, expected_err): _validate_admin_password(admin_password, is_linux) self.assertTrue(expected_err in str(context.exception)) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_parse_image_argument(self, client_factory_mock): compute_client = mock.MagicMock() image = mock.MagicMock() @@ -216,8 +216,8 @@ def test_parse_image_argument(self, client_factory_mock): self.assertEqual('product1', np.plan_product) self.assertEqual('publisher1', np.plan_publisher) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) - @mock.patch('azure.cli.command_modules.vm._validators.logger.warning', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators.logger.warning', autospec=True) def test_parse_staging_image_argument(self, logger_mock, client_factory_mock): from azure.core.exceptions import ResourceNotFoundError compute_client = mock.MagicMock() @@ -296,7 +296,7 @@ def test_get_next_subnet_addr_suffix(self): _validate_vmss_create_subnet(np_mock) self.assertEqual(np_mock.app_gateway_subnet_address_prefix, '10.0.8.0/24') - @mock.patch('azure.cli.command_modules.vm._validators._resolve_role_id', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._resolve_role_id', autospec=True) @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id', autospec=True) def test_validate_msi_on_create(self, mock_get_subscription, mock_resolve_role_id): # check throw on : az vm/vmss create --assign-identity --role reader --scope "" @@ -349,7 +349,7 @@ def test_validate_msi_on_create(self, mock_get_subscription, mock_resolve_role_i self.assertEqual(np_mock.identity_role, 'reader') mock_resolve_role_id.assert_called_with(cmd.cli_ctx, 'reader', 'foo-scope') - @mock.patch('azure.cli.command_modules.vm._validators._resolve_role_id', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._resolve_role_id', autospec=True) def test_validate_msi_on_assign_identity_command(self, mock_resolve_role_id): # check throw on : az vm/vmss assign-identity --role reader --scope "" np_mock = mock.MagicMock() @@ -410,7 +410,7 @@ def test_normalize_disk_info(self): normalize_disk_info(data_disk_cachings=['0=None', '1=foo']) self.assertTrue("Data disk with lun of '0' doesn't exist" in str(err.exception)) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_validate_vm_vmss_accelerated_networking(self, client_factory_mock): client_mock, size_mock = mock.MagicMock(), mock.MagicMock() client_mock.virtual_machine_sizes.list.return_value = [size_mock] diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_defaults.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_defaults.py index 520096b3d35..bc3baff0c0a 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_defaults.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_defaults.py @@ -11,15 +11,14 @@ from knack.util import CLIError +from azure.cli.command_modules.vm.azure_stack._validators import (_validate_vm_vmss_create_vnet, + _validate_vmss_create_subnet, + _validate_vm_create_storage_account, + _validate_vm_vmss_create_auth, + _validate_vm_create_storage_profile, + _validate_vmss_create_load_balancer_or_app_gateway) from azure.cli.core.profiles import ResourceType -from azure.cli.command_modules.vm._validators import (_validate_vm_vmss_create_vnet, - _validate_vmss_create_subnet, - _validate_vm_create_storage_account, - _validate_vm_vmss_create_auth, - _validate_vm_create_storage_profile, - _validate_vmss_create_load_balancer_or_app_gateway) - def _get_test_cmd(): from azure.cli.core.mock import DummyCli @@ -328,7 +327,7 @@ def test_linux_with_password(self): class TestVMImageDefaults(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_vm_validator_retrieve_image_info_cross_subscription(self, factory_mock): ns = argparse.Namespace() cmd = mock.MagicMock() diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_image.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_image.py index d5d1fc34821..d7618b033b9 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_image.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_image.py @@ -29,7 +29,7 @@ def _get_test_cmd(): class TestVMImage(unittest.TestCase): def test_read_images_from_alias_doc(self): - from azure.cli.command_modules.vm.custom import list_vm_images + from azure.cli.command_modules.vm.azure_stack.custom import list_vm_images cmd = _get_test_cmd() # action @@ -50,7 +50,7 @@ def test_read_images_from_alias_doc(self): @mock.patch('azure.cli.core.cloud.get_active_cloud', autospec=True) def test_when_alias_doc_is_missing(self, mock_get_active_cloud): - from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc + from azure.cli.command_modules.vm.azure_stack._actions import load_images_from_aliases_doc p = mock.PropertyMock(side_effect=CloudEndpointNotSetException('')) mock_cloud = mock.MagicMock() type(mock_cloud.endpoints).vm_image_alias_doc = p diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_parameters.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_parameters.py index f983141a16a..e38ea68dbe9 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_parameters.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2018_03_01/test_vm_parameters.py @@ -33,7 +33,7 @@ def _mock_get_subscription_id(_): def test_vm_nics(self): from argparse import Namespace - from azure.cli.command_modules.vm._validators import _validate_vm_create_nics + from azure.cli.command_modules.vm.azure_stack._validators import _validate_vm_create_nics def _get_test_cmd(): from azure.cli.core.mock import DummyCli diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_custom_vm_commands.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_custom_vm_commands.py index 2301b360244..e8a49703110 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_custom_vm_commands.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_custom_vm_commands.py @@ -8,21 +8,20 @@ from knack.util import CLIError -from azure.cli.command_modules.vm.custom import (enable_boot_diagnostics, disable_boot_diagnostics, - _merge_secrets, BootLogStreamWriter, - _get_access_extension_upgrade_info, - _LINUX_ACCESS_EXT, - _WINDOWS_ACCESS_EXT, - _get_extension_instance_name, - get_boot_log) -from azure.cli.command_modules.vm.custom import \ +from azure.cli.command_modules.vm.azure_stack.custom import (enable_boot_diagnostics, disable_boot_diagnostics, + _merge_secrets, BootLogStreamWriter, + _get_access_extension_upgrade_info, + _LINUX_ACCESS_EXT, + _WINDOWS_ACCESS_EXT, + _get_extension_instance_name, + get_boot_log) +from azure.cli.command_modules.vm.azure_stack.custom import \ (attach_unmanaged_data_disk, detach_unmanaged_data_disk, get_vmss_instance_view) from azure.cli.core import AzCommandsLoader from azure.cli.core.commands import AzCliCommand - -from azure.cli.command_modules.vm.disk_encryption import (encrypt_vm, decrypt_vm, encrypt_vmss, decrypt_vmss) +from azure.cli.command_modules.vm.azure_stack.disk_encryption import (encrypt_vm, decrypt_vm) from azure.cli.core.profiles import get_sdk, ResourceType from azure.cli.core.mock import DummyCli @@ -88,8 +87,8 @@ def test_get_access_extension_upgrade_info(self): self.assertEqual('1.5', version) self.assertEqual(True, auto_upgrade) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_get_to_update): vm_fake = mock.MagicMock() cmd = _get_test_cmd() @@ -101,8 +100,8 @@ def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_ self.assertTrue(mock_vm_get_to_update.called) mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get_to_update): vm_fake = mock.MagicMock() cmd = _get_test_cmd() @@ -115,8 +114,8 @@ def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get_to_update self.assertTrue(mock_vm_get_to_update.called) mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -141,8 +140,8 @@ def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get_to_upd self.assertTrue(data_disk.name.startswith('vm1-')) self.assertEqual(data_disk.vhd.uri, faked_vhd_uri) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -168,8 +167,8 @@ def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get_to_upda self.assertEqual(data_disk.lun, 0) # the existing disk has '1', so it verifes the second one be picked as '0' self.assertEqual(data_disk.vhd.uri, faked_vhd_uri2) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -194,8 +193,8 @@ def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get_to_update self.assertEqual(data_disk.name, 'd1') self.assertEqual(data_disk.vhd.uri, faked_vhd_uri) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long # stub to get the vm which has no datadisks @@ -213,7 +212,7 @@ def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get_to_update): mock_vm_set.assert_called_once_with(cmd, vm) self.assertEqual(len(vm.storage_profile.data_disks), 0) - @mock.patch('azure.cli.command_modules.vm.custom._compute_client_factory') + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom._compute_client_factory') def test_show_vmss_instance_view(self, factory_mock): vm_client = mock.MagicMock() cmd = _get_test_cmd() @@ -227,8 +226,8 @@ def test_show_vmss_instance_view(self, factory_mock): select='instanceView', expand='instanceView') # pylint: disable=line-too-long - @mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True) - @mock.patch('azure.cli.command_modules.vm.disk_encryption._get_keyvault_key_url', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._get_keyvault_key_url', autospec=True) def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, mock_compute_client_factory): faked_keyvault = '/subscriptions/01234567-1bf0-4dda-aec3-cb9272f09590/resourceGroups/rg1/providers/Microsoft.KeyVault/vaults/v1' os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux) @@ -254,8 +253,8 @@ def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, self.assertTrue("--aad-client-cert-thumbprint or --aad-client-secret" in str(context.exception)) - @mock.patch('azure.cli.command_modules.vm.disk_encryption.set_vm', autospec=True) - @mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._compute_client_factory', autospec=True) def test_disable_encryption_error_cases_handling(self, mock_compute_client_factory, mock_vm_set): # pylint: disable=unused-argument os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux) existing_disk = DataDisk(lun=1, vhd='https://someuri', name='d1', create_option=DiskCreateOptionTypes.empty) @@ -363,7 +362,7 @@ def test_get_extension_instance_name_when_type_none(self): class TestVMBootLog(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm.custom.logger.warning') + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.logger.warning') def test_vm_boot_log_handle_unicode(self, logger_warning__mock): import sys writer = BootLogStreamWriter(sys.stdout) diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_template_builder.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_template_builder.py index 87df1e73e92..2a2dedfe405 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_template_builder.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_template_builder.py @@ -6,12 +6,12 @@ import unittest from unittest import mock -from azure.cli.command_modules.vm._template_builder import build_load_balancer_resource +from azure.cli.command_modules.vm.azure_stack._template_builder import build_load_balancer_resource class TestTemplateBuilder(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm._template_builder.get_target_network_api', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._template_builder.get_target_network_api', autospec=True) def test_build_load_balancer_resource(self, mock_get_api): mock_get_api.returtn_value = '1970-01-01' cmd_mock = mock.MagicMock() diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_actions.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_actions.py index a85d0ae46f6..a9ed67b5c48 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_actions.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_actions.py @@ -10,18 +10,18 @@ from unittest import mock from azure.cli.core.keys import is_valid_ssh_rsa_public_key -from azure.cli.command_modules.vm._validators import (validate_ssh_key, - _figure_out_storage_source, - _validate_admin_username, - _validate_admin_password, - _parse_image_argument, - process_disk_create_namespace, - process_snapshot_create_namespace, - _validate_vmss_create_subnet, - _get_next_subnet_addr_suffix, - _validate_vm_vmss_msi, - _validate_vm_vmss_accelerated_networking) -from azure.cli.command_modules.vm._vm_utils import normalize_disk_info, update_disk_sku_info +from azure.cli.command_modules.vm.azure_stack._validators import (validate_ssh_key, + _figure_out_storage_source, + _validate_admin_username, + _validate_admin_password, + _parse_image_argument, + process_disk_create_namespace, + process_snapshot_create_namespace, + _validate_vmss_create_subnet, + _get_next_subnet_addr_suffix, + _validate_vm_vmss_msi, + _validate_vm_vmss_accelerated_networking) +from azure.cli.command_modules.vm.azure_stack._vm_utils import normalize_disk_info, update_disk_sku_info from azure.cli.core.mock import DummyCli from knack.util import CLIError @@ -190,7 +190,7 @@ def _verify_password_with_ex(self, admin_password, is_linux, expected_err): _validate_admin_password(admin_password, is_linux) self.assertTrue(expected_err in str(context.exception)) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_parse_image_argument(self, client_factory_mock): compute_client = mock.MagicMock() image = mock.MagicMock() @@ -215,8 +215,8 @@ def test_parse_image_argument(self, client_factory_mock): self.assertEqual('product1', np.plan_product) self.assertEqual('publisher1', np.plan_publisher) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) - @mock.patch('azure.cli.command_modules.vm._validators.logger.warning', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators.logger.warning', autospec=True) def test_parse_staging_image_argument(self, logger_mock, client_factory_mock): from azure.core.exceptions import ResourceNotFoundError compute_client = mock.MagicMock() @@ -295,7 +295,7 @@ def test_get_next_subnet_addr_suffix(self): _validate_vmss_create_subnet(np_mock) self.assertEqual(np_mock.app_gateway_subnet_address_prefix, '10.0.8.0/24') - @mock.patch('azure.cli.command_modules.vm._validators._resolve_role_id', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._resolve_role_id', autospec=True) @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id', autospec=True) def test_validate_msi_on_create(self, mock_get_subscription, mock_resolve_role_id): # check throw on : az vm/vmss create --assign-identity --role reader --scope "" @@ -348,7 +348,7 @@ def test_validate_msi_on_create(self, mock_get_subscription, mock_resolve_role_i self.assertEqual(np_mock.identity_role, 'reader') mock_resolve_role_id.assert_called_with(cmd.cli_ctx, 'reader', 'foo-scope') - @mock.patch('azure.cli.command_modules.vm._validators._resolve_role_id', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._resolve_role_id', autospec=True) def test_validate_msi_on_assign_identity_command(self, mock_resolve_role_id): # check throw on : az vm/vmss assign-identity --role reader --scope "" np_mock = mock.MagicMock() @@ -426,7 +426,7 @@ def test_normalize_disk_info(self): normalize_disk_info(data_disk_cachings=['ReadWrite'], data_disk_sizes_gb=[1, 2], size='standard_L16s_v2') self.assertTrue('for Lv series of machines, "None" is the only supported caching mode' in str(err.exception)) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_validate_vm_vmss_accelerated_networking(self, client_factory_mock): client_mock, size_mock = mock.MagicMock(), mock.MagicMock() client_mock.virtual_machine_sizes.list.return_value = [size_mock] diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_defaults.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_defaults.py index ff05da02579..f0b0da1dcd8 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_defaults.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_defaults.py @@ -13,12 +13,12 @@ from azure.cli.core.profiles import ResourceType -from azure.cli.command_modules.vm._validators import (_validate_vm_vmss_create_vnet, - _validate_vmss_create_subnet, - _validate_vm_create_storage_account, - _validate_vm_vmss_create_auth, - _validate_vm_create_storage_profile, - _validate_vmss_create_load_balancer_or_app_gateway) +from azure.cli.command_modules.vm.azure_stack._validators import (_validate_vm_vmss_create_vnet, + _validate_vmss_create_subnet, + _validate_vm_create_storage_account, + _validate_vm_vmss_create_auth, + _validate_vm_create_storage_profile, + _validate_vmss_create_load_balancer_or_app_gateway) def _get_test_cmd(): @@ -378,7 +378,7 @@ def test_linux_with_password_and_ssh_implicit(self): class TestVMImageDefaults(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_vm_validator_retrieve_image_info_cross_subscription(self, factory_mock): ns = argparse.Namespace() cmd = mock.MagicMock() @@ -406,7 +406,7 @@ def test_vm_validator_retrieve_image_info_cross_subscription(self, factory_mock) self.assertEqual(ns.os_type.value, 'someOS') self.assertTrue(0 in ns.disk_info) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_vm_validator_enables_ultrassd_lrs(self, factory_mock): ns = argparse.Namespace() cmd = mock.MagicMock() diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_image.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_image.py index d5d1fc34821..d7618b033b9 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_image.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_image.py @@ -29,7 +29,7 @@ def _get_test_cmd(): class TestVMImage(unittest.TestCase): def test_read_images_from_alias_doc(self): - from azure.cli.command_modules.vm.custom import list_vm_images + from azure.cli.command_modules.vm.azure_stack.custom import list_vm_images cmd = _get_test_cmd() # action @@ -50,7 +50,7 @@ def test_read_images_from_alias_doc(self): @mock.patch('azure.cli.core.cloud.get_active_cloud', autospec=True) def test_when_alias_doc_is_missing(self, mock_get_active_cloud): - from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc + from azure.cli.command_modules.vm.azure_stack._actions import load_images_from_aliases_doc p = mock.PropertyMock(side_effect=CloudEndpointNotSetException('')) mock_cloud = mock.MagicMock() type(mock_cloud.endpoints).vm_image_alias_doc = p diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_parameters.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_parameters.py index f983141a16a..e38ea68dbe9 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_parameters.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2019_03_01/test_vm_parameters.py @@ -33,7 +33,7 @@ def _mock_get_subscription_id(_): def test_vm_nics(self): from argparse import Namespace - from azure.cli.command_modules.vm._validators import _validate_vm_create_nics + from azure.cli.command_modules.vm.azure_stack._validators import _validate_vm_create_nics def _get_test_cmd(): from azure.cli.core.mock import DummyCli diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_custom_vm_commands.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_custom_vm_commands.py index d3626b17760..b36838e724b 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_custom_vm_commands.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_custom_vm_commands.py @@ -8,21 +8,21 @@ from knack.util import CLIError -from azure.cli.command_modules.vm.custom import (enable_boot_diagnostics, disable_boot_diagnostics, - _merge_secrets, BootLogStreamWriter, - _get_access_extension_upgrade_info, - _LINUX_ACCESS_EXT, - _WINDOWS_ACCESS_EXT, - _get_extension_instance_name, - get_boot_log) -from azure.cli.command_modules.vm.custom import \ +from azure.cli.command_modules.vm.azure_stack.custom import (enable_boot_diagnostics, disable_boot_diagnostics, + _merge_secrets, BootLogStreamWriter, + _get_access_extension_upgrade_info, + _LINUX_ACCESS_EXT, + _WINDOWS_ACCESS_EXT, + _get_extension_instance_name, + get_boot_log) +from azure.cli.command_modules.vm.azure_stack.custom import \ (attach_unmanaged_data_disk, detach_unmanaged_data_disk, get_vmss_instance_view) from azure.cli.core import AzCommandsLoader from azure.cli.core.commands import AzCliCommand - -from azure.cli.command_modules.vm.disk_encryption import (encrypt_vm, decrypt_vm, encrypt_vmss, decrypt_vmss) +from azure.cli.command_modules.vm.azure_stack.disk_encryption import (encrypt_vm, decrypt_vm, encrypt_vmss, + decrypt_vmss) from azure.cli.core.profiles import get_sdk, ResourceType from azure.cli.core.mock import DummyCli @@ -88,8 +88,8 @@ def test_get_access_extension_upgrade_info(self): self.assertEqual('1.5', version) self.assertEqual(True, auto_upgrade) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_get_to_update): vm_fake = mock.MagicMock() cmd = _get_test_cmd() @@ -101,8 +101,8 @@ def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_ self.assertTrue(mock_vm_get_to_update.called) mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY) - # @mock.patch('azure.cli.command_modules.vm.custom.get_vm', autospec=True) - # @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + # @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm', autospec=True) + # @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) # def test_enable_boot_diagnostics_skip_when_enabled_already(self, mock_vm_set, mock_vm_get): # vm_fake = mock.MagicMock() # cmd = _get_test_cmd() @@ -113,8 +113,8 @@ def test_enable_boot_diagnostics_on_vm_never_enabled(self, mock_vm_set, mock_vm_ # self.assertTrue(mock_vm_get.called) # self.assertFalse(mock_vm_set.called) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get_to_update): vm_fake = mock.MagicMock() cmd = _get_test_cmd() @@ -127,8 +127,8 @@ def test_disable_boot_diagnostics_on_vm(self, mock_vm_set, mock_vm_get_to_update self.assertTrue(mock_vm_get_to_update.called) mock_vm_set.assert_called_once_with(cmd, vm_fake, mock.ANY) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -153,8 +153,8 @@ def test_attach_new_datadisk_default_on_vm(self, mock_vm_set, mock_vm_get_to_upd self.assertTrue(data_disk.name.startswith('vm1-')) self.assertEqual(data_disk.vhd.uri, faked_vhd_uri) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -180,8 +180,8 @@ def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get_to_upda self.assertEqual(data_disk.lun, 0) # the existing disk has '1', so it verifes the second one be picked as '0' self.assertEqual(data_disk.vhd.uri, faked_vhd_uri2) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd' @@ -206,8 +206,8 @@ def test_attach_existing_datadisk_on_vm(self, mock_vm_set, mock_vm_get_to_update self.assertEqual(data_disk.name, 'd1') self.assertEqual(data_disk.vhd.uri, faked_vhd_uri) - @mock.patch('azure.cli.command_modules.vm.custom.get_vm_to_update', autospec=True) - @mock.patch('azure.cli.command_modules.vm.custom.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.get_vm_to_update', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.set_vm', autospec=True) def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get_to_update): # pylint: disable=line-too-long # stub to get the vm which has no datadisks @@ -225,7 +225,7 @@ def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get_to_update): mock_vm_set.assert_called_once_with(cmd, vm) self.assertEqual(len(vm.storage_profile.data_disks), 0) - @mock.patch('azure.cli.command_modules.vm.custom._compute_client_factory') + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom._compute_client_factory') def test_show_vmss_instance_view(self, factory_mock): vm_client = mock.MagicMock() cmd = _get_test_cmd() @@ -239,8 +239,8 @@ def test_show_vmss_instance_view(self, factory_mock): select='instanceView', expand='instanceView') # pylint: disable=line-too-long - @mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True) - @mock.patch('azure.cli.command_modules.vm.disk_encryption._get_keyvault_key_url', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._get_keyvault_key_url', autospec=True) def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, mock_compute_client_factory): faked_keyvault = '/subscriptions/01234567-1bf0-4dda-aec3-cb9272f09590/resourceGroups/rg1/providers/Microsoft.KeyVault/vaults/v1' os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux) @@ -266,8 +266,8 @@ def test_enable_encryption_error_cases_handling(self, mock_get_keyvault_key_url, self.assertTrue("--aad-client-cert-thumbprint or --aad-client-secret" in str(context.exception)) - @mock.patch('azure.cli.command_modules.vm.disk_encryption.set_vm', autospec=True) - @mock.patch('azure.cli.command_modules.vm.disk_encryption._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption.set_vm', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack.disk_encryption._compute_client_factory', autospec=True) def test_disable_encryption_error_cases_handling(self, mock_compute_client_factory, mock_vm_set): # pylint: disable=unused-argument os_disk = OSDisk(create_option=None, os_type=OperatingSystemTypes.linux) existing_disk = DataDisk(lun=1, vhd='https://someuri', name='d1', create_option=DiskCreateOptionTypes.empty) @@ -375,7 +375,7 @@ def test_get_extension_instance_name_when_type_none(self): class TestVMBootLog(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm.custom.logger.warning') + @mock.patch('azure.cli.command_modules.vm.azure_stack.custom.logger.warning') def test_vm_boot_log_handle_unicode(self, logger_warning__mock): import sys writer = BootLogStreamWriter(sys.stdout) diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_template_builder.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_template_builder.py index 87df1e73e92..2a2dedfe405 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_template_builder.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_template_builder.py @@ -6,12 +6,12 @@ import unittest from unittest import mock -from azure.cli.command_modules.vm._template_builder import build_load_balancer_resource +from azure.cli.command_modules.vm.azure_stack._template_builder import build_load_balancer_resource class TestTemplateBuilder(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm._template_builder.get_target_network_api', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._template_builder.get_target_network_api', autospec=True) def test_build_load_balancer_resource(self, mock_get_api): mock_get_api.returtn_value = '1970-01-01' cmd_mock = mock.MagicMock() diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_actions.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_actions.py index a85d0ae46f6..a9ed67b5c48 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_actions.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_actions.py @@ -10,18 +10,18 @@ from unittest import mock from azure.cli.core.keys import is_valid_ssh_rsa_public_key -from azure.cli.command_modules.vm._validators import (validate_ssh_key, - _figure_out_storage_source, - _validate_admin_username, - _validate_admin_password, - _parse_image_argument, - process_disk_create_namespace, - process_snapshot_create_namespace, - _validate_vmss_create_subnet, - _get_next_subnet_addr_suffix, - _validate_vm_vmss_msi, - _validate_vm_vmss_accelerated_networking) -from azure.cli.command_modules.vm._vm_utils import normalize_disk_info, update_disk_sku_info +from azure.cli.command_modules.vm.azure_stack._validators import (validate_ssh_key, + _figure_out_storage_source, + _validate_admin_username, + _validate_admin_password, + _parse_image_argument, + process_disk_create_namespace, + process_snapshot_create_namespace, + _validate_vmss_create_subnet, + _get_next_subnet_addr_suffix, + _validate_vm_vmss_msi, + _validate_vm_vmss_accelerated_networking) +from azure.cli.command_modules.vm.azure_stack._vm_utils import normalize_disk_info, update_disk_sku_info from azure.cli.core.mock import DummyCli from knack.util import CLIError @@ -190,7 +190,7 @@ def _verify_password_with_ex(self, admin_password, is_linux, expected_err): _validate_admin_password(admin_password, is_linux) self.assertTrue(expected_err in str(context.exception)) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_parse_image_argument(self, client_factory_mock): compute_client = mock.MagicMock() image = mock.MagicMock() @@ -215,8 +215,8 @@ def test_parse_image_argument(self, client_factory_mock): self.assertEqual('product1', np.plan_product) self.assertEqual('publisher1', np.plan_publisher) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) - @mock.patch('azure.cli.command_modules.vm._validators.logger.warning', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators.logger.warning', autospec=True) def test_parse_staging_image_argument(self, logger_mock, client_factory_mock): from azure.core.exceptions import ResourceNotFoundError compute_client = mock.MagicMock() @@ -295,7 +295,7 @@ def test_get_next_subnet_addr_suffix(self): _validate_vmss_create_subnet(np_mock) self.assertEqual(np_mock.app_gateway_subnet_address_prefix, '10.0.8.0/24') - @mock.patch('azure.cli.command_modules.vm._validators._resolve_role_id', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._resolve_role_id', autospec=True) @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id', autospec=True) def test_validate_msi_on_create(self, mock_get_subscription, mock_resolve_role_id): # check throw on : az vm/vmss create --assign-identity --role reader --scope "" @@ -348,7 +348,7 @@ def test_validate_msi_on_create(self, mock_get_subscription, mock_resolve_role_i self.assertEqual(np_mock.identity_role, 'reader') mock_resolve_role_id.assert_called_with(cmd.cli_ctx, 'reader', 'foo-scope') - @mock.patch('azure.cli.command_modules.vm._validators._resolve_role_id', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._resolve_role_id', autospec=True) def test_validate_msi_on_assign_identity_command(self, mock_resolve_role_id): # check throw on : az vm/vmss assign-identity --role reader --scope "" np_mock = mock.MagicMock() @@ -426,7 +426,7 @@ def test_normalize_disk_info(self): normalize_disk_info(data_disk_cachings=['ReadWrite'], data_disk_sizes_gb=[1, 2], size='standard_L16s_v2') self.assertTrue('for Lv series of machines, "None" is the only supported caching mode' in str(err.exception)) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_validate_vm_vmss_accelerated_networking(self, client_factory_mock): client_mock, size_mock = mock.MagicMock(), mock.MagicMock() client_mock.virtual_machine_sizes.list.return_value = [size_mock] diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_defaults.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_defaults.py index e35577cdc9e..940e08cfc19 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_defaults.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_defaults.py @@ -13,12 +13,12 @@ from azure.cli.core.profiles import ResourceType -from azure.cli.command_modules.vm._validators import (_validate_vm_vmss_create_vnet, - _validate_vmss_create_subnet, - _validate_vm_create_storage_account, - _validate_vm_vmss_create_auth, - _validate_vm_create_storage_profile, - _validate_vmss_create_load_balancer_or_app_gateway) +from azure.cli.command_modules.vm.azure_stack._validators import (_validate_vm_vmss_create_vnet, + _validate_vmss_create_subnet, + _validate_vm_create_storage_account, + _validate_vm_vmss_create_auth, + _validate_vm_create_storage_profile, + _validate_vmss_create_load_balancer_or_app_gateway) def _get_test_cmd(): @@ -396,7 +396,7 @@ def test_linux_with_password_and_ssh_implicit(self): class TestVMImageDefaults(unittest.TestCase): - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_vm_validator_retrieve_image_info_cross_subscription(self, factory_mock): ns = argparse.Namespace() cmd = mock.MagicMock() @@ -424,7 +424,7 @@ def test_vm_validator_retrieve_image_info_cross_subscription(self, factory_mock) self.assertEqual(ns.os_type.value, 'someOS') self.assertTrue(0 in ns.disk_info) - @mock.patch('azure.cli.command_modules.vm._validators._compute_client_factory', autospec=True) + @mock.patch('azure.cli.command_modules.vm.azure_stack._validators._compute_client_factory', autospec=True) def test_vm_validator_enables_ultrassd_lrs(self, factory_mock): ns = argparse.Namespace() cmd = mock.MagicMock() diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_image.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_image.py index d5d1fc34821..d7618b033b9 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_image.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_image.py @@ -29,7 +29,7 @@ def _get_test_cmd(): class TestVMImage(unittest.TestCase): def test_read_images_from_alias_doc(self): - from azure.cli.command_modules.vm.custom import list_vm_images + from azure.cli.command_modules.vm.azure_stack.custom import list_vm_images cmd = _get_test_cmd() # action @@ -50,7 +50,7 @@ def test_read_images_from_alias_doc(self): @mock.patch('azure.cli.core.cloud.get_active_cloud', autospec=True) def test_when_alias_doc_is_missing(self, mock_get_active_cloud): - from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc + from azure.cli.command_modules.vm.azure_stack._actions import load_images_from_aliases_doc p = mock.PropertyMock(side_effect=CloudEndpointNotSetException('')) mock_cloud = mock.MagicMock() type(mock_cloud.endpoints).vm_image_alias_doc = p diff --git a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_parameters.py b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_parameters.py index f983141a16a..e38ea68dbe9 100644 --- a/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_parameters.py +++ b/src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_parameters.py @@ -33,7 +33,7 @@ def _mock_get_subscription_id(_): def test_vm_nics(self): from argparse import Namespace - from azure.cli.command_modules.vm._validators import _validate_vm_create_nics + from azure.cli.command_modules.vm.azure_stack._validators import _validate_vm_create_nics def _get_test_cmd(): from azure.cli.core.mock import DummyCli