diff --git a/src/storagecache/HISTORY.rst b/src/storagecache/HISTORY.rst
new file mode 100644
index 00000000000..1c139576ba0
--- /dev/null
+++ b/src/storagecache/HISTORY.rst
@@ -0,0 +1,8 @@
+.. :changelog:
+
+Release History
+===============
+
+0.1.0
+++++++
+* Initial release.
diff --git a/src/storagecache/README.md b/src/storagecache/README.md
new file mode 100644
index 00000000000..ca9a29e5bc1
--- /dev/null
+++ b/src/storagecache/README.md
@@ -0,0 +1,107 @@
+# Azure CLI storagecache Extension #
+This is the extension for storagecache
+
+### How to use ###
+Install this extension using the below CLI command
+```
+az extension add --name storagecache
+```
+
+### Included Features ###
+#### storagecache sku ####
+##### List #####
+```
+az storagecache sku list
+```
+#### storagecache usage-model ####
+##### List #####
+```
+az storagecache usage-model list
+```
+#### storagecache asc-operation ####
+##### Show #####
+```
+az storagecache asc-operation show --operation-id "testoperationid" --location "westus"
+```
+#### storagecache cache ####
+##### Create #####
+```
+az storagecache cache create --location "westus" --cache-size-gb 3072 \
+ --subnet "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" \
+ --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Create #####
+```
+az storagecache cache create --location "westus" --cache-size-gb 3072 \
+ --subnet "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" \
+ --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### List #####
+```
+az storagecache cache list --resource-group "scgroup"
+```
+##### Show #####
+```
+az storagecache cache show --cache-name "sc1" --resource-group "scgroup"
+```
+##### Update #####
+```
+az storagecache cache update --location "westus" --cache-size-gb 3072 \
+ --subnet "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" \
+ --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Update #####
+```
+az storagecache cache update --location "westus" --cache-size-gb 3072 \
+ --subnet "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" \
+ --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Flush #####
+```
+az storagecache cache flush --cache-name "sc" --resource-group "scgroup"
+```
+##### Start #####
+```
+az storagecache cache start --cache-name "sc" --resource-group "scgroup"
+```
+##### Stop #####
+```
+az storagecache cache stop --cache-name "sc" --resource-group "scgroup"
+```
+##### Upgrade-firmware #####
+```
+az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup"
+```
+##### Delete #####
+```
+az storagecache cache delete --cache-name "sc" --resource-group "scgroup"
+```
+#### storagecache storage-target ####
+##### Create #####
+```
+az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" \
+ --junctions namespace-path="/path/on/cache" nfs-access-policy="default" nfs-export="exp1" target-path="/path/on/exp1" \
+ --junctions namespace-path="/path2/on/cache" nfs-access-policy="rootSquash" nfs-export="exp2" target-path="/path2/on/exp2" \
+ --nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --target-type "nfs3"
+
+az storagecache storage-target wait --created --resource-group "{rg}" --name "{myStorageTarget}"
+```
+##### Create #####
+```
+az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" \
+ --nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --target-type "nfs3"
+
+az storagecache storage-target wait --created --resource-group "{rg}" --name "{myStorageTarget}"
+```
+##### List #####
+```
+az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup"
+```
+##### Show #####
+```
+az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1"
+```
+##### Delete #####
+```
+az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1"
+```
\ No newline at end of file
diff --git a/src/storagecache/azext_storagecache/__init__.py b/src/storagecache/azext_storagecache/__init__.py
new file mode 100644
index 00000000000..d87eedc9417
--- /dev/null
+++ b/src/storagecache/azext_storagecache/__init__.py
@@ -0,0 +1,50 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+from azure.cli.core import AzCommandsLoader
+from azext_storagecache.generated._help import helps # pylint: disable=unused-import
+try:
+ from azext_storagecache.manual._help import helps # pylint: disable=reimported
+except ImportError:
+ pass
+
+
+class StorageCacheManagementClientCommandsLoader(AzCommandsLoader):
+
+ def __init__(self, cli_ctx=None):
+ from azure.cli.core.commands import CliCommandType
+ from azext_storagecache.generated._client_factory import cf_storagecache_cl
+ storagecache_custom = CliCommandType(
+ operations_tmpl='azext_storagecache.custom#{}',
+ client_factory=cf_storagecache_cl)
+ parent = super(StorageCacheManagementClientCommandsLoader, self)
+ parent.__init__(cli_ctx=cli_ctx, custom_command_type=storagecache_custom)
+
+ def load_command_table(self, args):
+ from azext_storagecache.generated.commands import load_command_table
+ load_command_table(self, args)
+ try:
+ from azext_storagecache.manual.commands import load_command_table as load_command_table_manual
+ load_command_table_manual(self, args)
+ except ImportError:
+ pass
+ return self.command_table
+
+ def load_arguments(self, command):
+ from azext_storagecache.generated._params import load_arguments
+ load_arguments(self, command)
+ try:
+ from azext_storagecache.manual._params import load_arguments as load_arguments_manual
+ load_arguments_manual(self, command)
+ except ImportError:
+ pass
+
+
+COMMAND_LOADER_CLS = StorageCacheManagementClientCommandsLoader
diff --git a/src/storagecache/azext_storagecache/action.py b/src/storagecache/azext_storagecache/action.py
new file mode 100644
index 00000000000..d95d53bf711
--- /dev/null
+++ b/src/storagecache/azext_storagecache/action.py
@@ -0,0 +1,17 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wildcard-import
+# pylint: disable=unused-wildcard-import
+
+from .generated.action import * # noqa: F403
+try:
+ from .manual.action import * # noqa: F403
+except ImportError:
+ pass
diff --git a/src/storagecache/azext_storagecache/azext_metadata.json b/src/storagecache/azext_storagecache/azext_metadata.json
new file mode 100644
index 00000000000..4f48fa652a5
--- /dev/null
+++ b/src/storagecache/azext_storagecache/azext_metadata.json
@@ -0,0 +1,4 @@
+{
+ "azext.isExperimental": true,
+ "azext.minCliCoreVersion": "2.11.0"
+}
\ No newline at end of file
diff --git a/src/storagecache/azext_storagecache/custom.py b/src/storagecache/azext_storagecache/custom.py
new file mode 100644
index 00000000000..dbe9d5f9742
--- /dev/null
+++ b/src/storagecache/azext_storagecache/custom.py
@@ -0,0 +1,17 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=wildcard-import
+# pylint: disable=unused-wildcard-import
+
+from .generated.custom import * # noqa: F403
+try:
+ from .manual.custom import * # noqa: F403
+except ImportError:
+ pass
diff --git a/src/storagecache/azext_storagecache/generated/__init__.py b/src/storagecache/azext_storagecache/generated/__init__.py
new file mode 100644
index 00000000000..c9cfdc73e77
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/__init__.py
@@ -0,0 +1,12 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/src/storagecache/azext_storagecache/generated/_client_factory.py b/src/storagecache/azext_storagecache/generated/_client_factory.py
new file mode 100644
index 00000000000..c79266b21c7
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/_client_factory.py
@@ -0,0 +1,36 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+
+def cf_storagecache_cl(cli_ctx, *_):
+ from azure.cli.core.commands.client_factory import get_mgmt_service_client
+ from ..vendored_sdks.storagecache import StorageCacheManagementClient
+ return get_mgmt_service_client(cli_ctx,
+ StorageCacheManagementClient)
+
+
+def cf_sku(cli_ctx, *_):
+ return cf_storagecache_cl(cli_ctx).sku
+
+
+def cf_usage_model(cli_ctx, *_):
+ return cf_storagecache_cl(cli_ctx).usage_model
+
+
+def cf_ascoperation(cli_ctx, *_):
+ return cf_storagecache_cl(cli_ctx).asc_operation
+
+
+def cf_cache(cli_ctx, *_):
+ return cf_storagecache_cl(cli_ctx).cache
+
+
+def cf_storage_target(cli_ctx, *_):
+ return cf_storagecache_cl(cli_ctx).storage_target
diff --git a/src/storagecache/azext_storagecache/generated/_help.py b/src/storagecache/azext_storagecache/generated/_help.py
new file mode 100644
index 00000000000..2968c08e77c
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/_help.py
@@ -0,0 +1,331 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines
+
+from knack.help_files import helps
+
+
+helps['storagecache sku'] = """
+ type: group
+ short-summary: storagecache sku
+"""
+
+helps['storagecache sku list'] = """
+ type: command
+ short-summary: "Get the list of StorageCache.Cache SKUs available to this subscription."
+ examples:
+ - name: Skus_List
+ text: |-
+ az storagecache sku list
+"""
+
+helps['storagecache usage-model'] = """
+ type: group
+ short-summary: storagecache usage-model
+"""
+
+helps['storagecache usage-model list'] = """
+ type: command
+ short-summary: "Get the list of Cache Usage Models available to this subscription."
+ examples:
+ - name: UsageModels_List
+ text: |-
+ az storagecache usage-model list
+"""
+
+helps['storagecache asc-operation'] = """
+ type: group
+ short-summary: storagecache asc-operation
+"""
+
+helps['storagecache asc-operation show'] = """
+ type: command
+ short-summary: "Gets the status of an asynchronous operation for the Azure HPC Cache."
+ examples:
+ - name: AscOperations_Get
+ text: |-
+ az storagecache asc-operation show --operation-id "testoperationid" --location "westus"
+"""
+
+helps['storagecache cache'] = """
+ type: group
+ short-summary: storagecache cache
+"""
+
+helps['storagecache cache list'] = """
+ type: command
+ short-summary: "Returns all Caches the user has access to under a resource group. And Returns all Caches the user \
+has access to under a subscription."
+ examples:
+ - name: Caches_ListByResourceGroup
+ text: |-
+ az storagecache cache list --resource-group "scgroup"
+ - name: Caches_List
+ text: |-
+ az storagecache cache list
+"""
+
+helps['storagecache cache show'] = """
+ type: command
+ short-summary: "Returns a Cache."
+ examples:
+ - name: Caches_Get
+ text: |-
+ az storagecache cache show --cache-name "sc1" --resource-group "scgroup"
+"""
+
+helps['storagecache cache create'] = """
+ type: command
+ short-summary: "Create a Cache."
+ parameters:
+ - name: --directory-services-settings-username-download-credentials
+ short-summary: "When present, these are the credentials for the secure LDAP connection."
+ long-summary: |
+ Usage: --directory-services-settings-username-download-credentials bind-dn=XX bind-password=XX
+
+ bind-dn: The Bind distinguished name identity to be used in the secure LDAP connection. This value is \
+stored encrypted and not returned on response.
+ bind-password: The Bind password to be used in the secure LDAP connection. This value is stored encrypted \
+and not returned on response.
+ - name: --security-settings-access-policies
+ short-summary: "NFS access policies defined for this cache."
+ long-summary: |
+ Usage: --security-settings-access-policies name=XX access-rules=XX
+
+ name: Name identifying this policy. Access Policy names are not case sensitive.
+ access-rules: The set of rules describing client accesses allowed under this policy.
+
+ Multiple actions can be specified by using more than one --security-settings-access-policies argument.
+ examples:
+ - name: Caches_CreateOrUpdate
+ text: |-
+ az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet \
+"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks\
+/scvnet/subnets/sub1" --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" \
+--resource-group "scgroup"
+ - name: Caches_CreateOrUpdate_ldap_only
+ text: |-
+ az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet \
+"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks\
+/scvnet/subnets/sub1" --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" \
+--resource-group "scgroup"
+"""
+
+helps['storagecache cache update'] = """
+ type: command
+ short-summary: "Update a Cache instance."
+ parameters:
+ - name: --directory-services-settings-username-download-credentials
+ short-summary: "When present, these are the credentials for the secure LDAP connection."
+ long-summary: |
+ Usage: --directory-services-settings-username-download-credentials bind-dn=XX bind-password=XX
+
+ bind-dn: The Bind distinguished name identity to be used in the secure LDAP connection. This value is \
+stored encrypted and not returned on response.
+ bind-password: The Bind password to be used in the secure LDAP connection. This value is stored encrypted \
+and not returned on response.
+ - name: --security-settings-access-policies
+ short-summary: "NFS access policies defined for this cache."
+ long-summary: |
+ Usage: --security-settings-access-policies name=XX access-rules=XX
+
+ name: Name identifying this policy. Access Policy names are not case sensitive.
+ access-rules: The set of rules describing client accesses allowed under this policy.
+
+ Multiple actions can be specified by using more than one --security-settings-access-policies argument.
+ examples:
+ - name: Caches_Update
+ text: |-
+ az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet \
+"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks\
+/scvnet/subnets/sub1" --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" \
+--resource-group "scgroup"
+ - name: Caches_Update_ldap_only
+ text: |-
+ az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet \
+"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks\
+/scvnet/subnets/sub1" --sku-name "Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" \
+--resource-group "scgroup"
+"""
+
+helps['storagecache cache delete'] = """
+ type: command
+ short-summary: "Schedules a Cache for deletion."
+ examples:
+ - name: Caches_Delete
+ text: |-
+ az storagecache cache delete --cache-name "sc" --resource-group "scgroup"
+"""
+
+helps['storagecache cache flush'] = """
+ type: command
+ short-summary: "Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will see \
+errors returned until the flush is complete."
+ examples:
+ - name: Caches_Flush
+ text: |-
+ az storagecache cache flush --cache-name "sc" --resource-group "scgroup"
+"""
+
+helps['storagecache cache start'] = """
+ type: command
+ short-summary: "Tells a Stopped state Cache to transition to Active state."
+ examples:
+ - name: Caches_Start
+ text: |-
+ az storagecache cache start --cache-name "sc" --resource-group "scgroup"
+"""
+
+helps['storagecache cache stop'] = """
+ type: command
+ short-summary: "Tells an Active Cache to transition to Stopped state."
+ examples:
+ - name: Caches_Stop
+ text: |-
+ az storagecache cache stop --cache-name "sc" --resource-group "scgroup"
+"""
+
+helps['storagecache cache upgrade-firmware'] = """
+ type: command
+ short-summary: "Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no \
+effect."
+ examples:
+ - name: Caches_UpgradeFirmware
+ text: |-
+ az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup"
+"""
+
+helps['storagecache cache wait'] = """
+ type: command
+ short-summary: Place the CLI in a waiting state until a condition of the storagecache cache is met.
+ examples:
+ - name: Pause executing next line of CLI script until the storagecache cache is successfully created.
+ text: |-
+ az storagecache cache wait --cache-name "sc1" --resource-group "scgroup" --created
+ - name: Pause executing next line of CLI script until the storagecache cache is successfully deleted.
+ text: |-
+ az storagecache cache wait --cache-name "sc1" --resource-group "scgroup" --deleted
+"""
+
+helps['storagecache storage-target'] = """
+ type: group
+ short-summary: storagecache storage-target
+"""
+
+helps['storagecache storage-target list'] = """
+ type: command
+ short-summary: "Returns a list of Storage Targets for the specified Cache."
+ examples:
+ - name: StorageTargets_List
+ text: |-
+ az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup"
+"""
+
+helps['storagecache storage-target show'] = """
+ type: command
+ short-summary: "Returns a Storage Target from a Cache."
+ examples:
+ - name: StorageTargets_Get
+ text: |-
+ az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1"
+"""
+
+helps['storagecache storage-target create'] = """
+ type: command
+ short-summary: "Create a Storage Target. This operation is allowed at any time, but if the Cache is down or \
+unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again."
+ parameters:
+ - name: --junctions
+ short-summary: "List of Cache namespace junctions to target for namespace associations."
+ long-summary: |
+ Usage: --junctions namespace-path=XX target-path=XX nfs-export=XX nfs-access-policy=XX
+
+ namespace-path: Namespace path on a Cache for a Storage Target.
+ target-path: Path in Storage Target to which namespacePath points.
+ nfs-export: NFS export where targetPath exists.
+ nfs-access-policy: Name of the access policy applied to this junction.
+
+ Multiple actions can be specified by using more than one --junctions argument.
+ - name: --nfs3
+ short-summary: "Properties when targetType is nfs3."
+ long-summary: |
+ Usage: --nfs3 target=XX usage-model=XX
+
+ target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44).
+ usage-model: Identifies the usage model to be used for this Storage Target. Get choices from \
+.../usageModels
+ examples:
+ - name: StorageTargets_CreateOrUpdate
+ text: |-
+ az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" \
+--junctions namespace-path="/path/on/cache" nfs-access-policy="default" nfs-export="exp1" target-path="/path/on/exp1" \
+--junctions namespace-path="/path2/on/cache" nfs-access-policy="rootSquash" nfs-export="exp2" \
+target-path="/path2/on/exp2" --nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --target-type "nfs3"
+ - name: StorageTargets_CreateOrUpdate_NoJunctions
+ text: |-
+ az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" --nfs3 \
+target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --target-type "nfs3"
+"""
+
+helps['storagecache storage-target update'] = """
+ type: command
+ short-summary: "Update a Storage Target. This operation is allowed at any time, but if the Cache is down or \
+unhealthy, the actual creation/modification of the Storage Target may be delayed until the Cache is healthy again."
+ parameters:
+ - name: --junctions
+ short-summary: "List of Cache namespace junctions to target for namespace associations."
+ long-summary: |
+ Usage: --junctions namespace-path=XX target-path=XX nfs-export=XX nfs-access-policy=XX
+
+ namespace-path: Namespace path on a Cache for a Storage Target.
+ target-path: Path in Storage Target to which namespacePath points.
+ nfs-export: NFS export where targetPath exists.
+ nfs-access-policy: Name of the access policy applied to this junction.
+
+ Multiple actions can be specified by using more than one --junctions argument.
+ - name: --nfs3
+ short-summary: "Properties when targetType is nfs3."
+ long-summary: |
+ Usage: --nfs3 target=XX usage-model=XX
+
+ target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44).
+ usage-model: Identifies the usage model to be used for this Storage Target. Get choices from \
+.../usageModels
+"""
+
+helps['storagecache storage-target delete'] = """
+ type: command
+ short-summary: "Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache is \
+down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache is healthy again. Note that \
+if the Cache has data to flush to the Storage Target, the data will be flushed before the Storage Target will be \
+deleted."
+ examples:
+ - name: StorageTargets_Delete
+ text: |-
+ az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1"
+"""
+
+helps['storagecache storage-target wait'] = """
+ type: command
+ short-summary: Place the CLI in a waiting state until a condition of the storagecache storage-target is met.
+ examples:
+ - name: Pause executing next line of CLI script until the storagecache storage-target is successfully created.
+ text: |-
+ az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" \
+--created
+ - name: Pause executing next line of CLI script until the storagecache storage-target is successfully updated.
+ text: |-
+ az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" \
+--updated
+ - name: Pause executing next line of CLI script until the storagecache storage-target is successfully deleted.
+ text: |-
+ az storagecache storage-target wait --cache-name "sc1" --resource-group "scgroup" --name "st1" \
+--deleted
+"""
diff --git a/src/storagecache/azext_storagecache/generated/_params.py b/src/storagecache/azext_storagecache/generated/_params.py
new file mode 100644
index 00000000000..0c86d100908
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/_params.py
@@ -0,0 +1,277 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines
+# pylint: disable=too-many-statements
+
+from azure.cli.core.commands.parameters import (
+ tags_type,
+ get_three_state_flag,
+ get_enum_type,
+ resource_group_name_type,
+ get_location_type
+)
+from azure.cli.core.commands.validators import get_default_location_from_resource_group
+from azext_storagecache.action import (
+ AddDirectoryServicesSettingsUsernameDownloadCredentials,
+ AddSecuritySettingsAccessPolicies,
+ AddJunctions,
+ AddNfs3,
+ AddUnknownUnknownMap
+)
+
+
+def load_arguments(self, _):
+
+ with self.argument_context('storagecache asc-operation show') as c:
+ c.argument('location', arg_type=get_location_type(self.cli_ctx), id_part='name')
+ c.argument('operation_id', type=str, help='The operation id which uniquely identifies the asynchronous '
+ 'operation.', id_part='child_name_1')
+
+ with self.argument_context('storagecache cache list') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+
+ with self.argument_context('storagecache cache show') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache cache create') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('tags', tags_type)
+ c.argument('location', arg_type=get_location_type(self.cli_ctx),
+ validator=get_default_location_from_resource_group)
+ c.argument('cache_size_gb', type=int, help='The size of this Cache, in GB.')
+ c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', ''
+ 'Deleting', 'Updating']), help='ARM provisioning '
+ 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi'
+ 'oningstate-property')
+ c.argument('subnet', type=str, help='Subnet used for the Cache.')
+ c.argument('directory_services_settings_username_download_extended_groups_enabled',
+ arg_type=get_three_state_flag(), help='This indicates if Extended Groups is enabled.')
+ c.argument('directory_services_settings_username_download_username_source', arg_type=get_enum_type(['AD', ''
+ 'LDAP', ''
+ 'File', ''
+ 'None']),
+ help='This setting determines how the system gets username and group names for clients.')
+ c.argument('directory_services_settings_username_download_group_file_uri', type=str, help='The URI of the file '
+ 'containing the group information (in /etc/group file format). This field must be populated when '
+ '\'usernameSource\' is set to \'File\'.')
+ c.argument('directory_services_settings_username_download_user_file_uri', type=str, help='The URI of the file '
+ 'containing the user information (in /etc/passwd file format). This field must be populated when '
+ '\'usernameSource\' is set to \'File\'.')
+ c.argument('directory_services_settings_username_download_ldap_server', type=str, help='The fully qualified '
+ 'domain name or IP address of the LDAP server to use.')
+ c.argument('directory_services_settings_username_download_ldap_base_dn', type=str, help='The base '
+ 'distinguished name for the LDAP domain.')
+ c.argument('directory_services_settings_username_download_encrypt_ldap_connection',
+ arg_type=get_three_state_flag(), help='This indicates if the LDAP connection should be encrypted.')
+ c.argument('directory_services_settings_username_download_require_valid_certificate',
+ arg_type=get_three_state_flag(), help='Determines if the certificates should be validated by a '
+ 'certificate authority. When true, caCertificateURI must be provided.')
+ c.argument('directory_services_settings_username_download_auto_download_certificate',
+ arg_type=get_three_state_flag(), help='Determines if the certificate should be automatically '
+ 'downloaded. This applies to \'caCertificateURI\' when \'requireValidCertificate\' is true, or a '
+ 'self signed certificate otherwise.')
+ c.argument('directory_services_settings_username_download_ca_certificate_uri', type=str, help='The URI of the '
+ 'CA certificate to validate the LDAP secure connection. This field must be populated when '
+ '\'requireValidCertificate\' is set to true.')
+ c.argument('directory_services_settings_username_download_credentials',
+ action=AddDirectoryServicesSettingsUsernameDownloadCredentials, nargs='*', help='When present, '
+ 'these are the credentials for the secure LDAP connection.')
+ c.argument('directory_services_settings_active_directory_primary_dns_ip_address', type=str, help='Primary DNS '
+ 'IP address used to resolve the Active Directory domain controller\'s fully qualified domain name.')
+ c.argument('directory_services_settings_active_directory_secondary_dns_ip_address', type=str, help='Secondary '
+ 'DNS IP address used to resolve the Active Directory domain controller\'s fully qualified domain '
+ 'name.')
+ c.argument('directory_services_settings_active_directory_domain_name', type=str, help='The fully qualified '
+ 'domain name of the Active Directory domain controller.')
+ c.argument('directory_services_settings_active_directory_domain_net_bios', type=str, help='The Active '
+ 'Directory domain\'s NetBIOS name.')
+ c.argument('directory_services_settings_active_directory_smb_server_name', type=str, help='The name (NetBIOS) '
+ 'used for the HPC Cache to join the Active Directory domain. Length must not be greater than 15 and '
+ 'chars must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('security_settings_access_policies', action=AddSecuritySettingsAccessPolicies, nargs='*', help='NFS '
+ 'access policies defined for this cache.')
+ c.argument('encryption_settings_key_encryption_key_key_url', type=str, help='The URL referencing a key '
+ 'encryption key in Key Vault.')
+ c.argument('encryption_settings_key_encryption_key_source_vault_id', type=str, help='Resource Id.')
+ c.argument('network_settings_mtu', type=int, help='The IPv4 maximum transmission unit configured for the '
+ 'subnet.')
+ c.argument('sku_name', type=str, help='SKU name for this Cache.')
+ c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='The type of identity '
+ 'used for the cache')
+
+ with self.argument_context('storagecache cache update') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('tags', tags_type)
+ c.argument('location', arg_type=get_location_type(self.cli_ctx),
+ validator=get_default_location_from_resource_group)
+ c.argument('cache_size_gb', type=int, help='The size of this Cache, in GB.')
+ c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', ''
+ 'Deleting', 'Updating']), help='ARM provisioning '
+ 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi'
+ 'oningstate-property')
+ c.argument('subnet', type=str, help='Subnet used for the Cache.')
+ c.argument('directory_services_settings_username_download_extended_groups_enabled',
+ arg_type=get_three_state_flag(), help='This indicates if Extended Groups is enabled.')
+ c.argument('directory_services_settings_username_download_username_source', arg_type=get_enum_type(['AD', ''
+ 'LDAP', ''
+ 'File', ''
+ 'None']),
+ help='This setting determines how the system gets username and group names for clients.')
+ c.argument('directory_services_settings_username_download_group_file_uri', type=str, help='The URI of the file '
+ 'containing the group information (in /etc/group file format). This field must be populated when '
+ '\'usernameSource\' is set to \'File\'.')
+ c.argument('directory_services_settings_username_download_user_file_uri', type=str, help='The URI of the file '
+ 'containing the user information (in /etc/passwd file format). This field must be populated when '
+ '\'usernameSource\' is set to \'File\'.')
+ c.argument('directory_services_settings_username_download_ldap_server', type=str, help='The fully qualified '
+ 'domain name or IP address of the LDAP server to use.')
+ c.argument('directory_services_settings_username_download_ldap_base_dn', type=str, help='The base '
+ 'distinguished name for the LDAP domain.')
+ c.argument('directory_services_settings_username_download_encrypt_ldap_connection',
+ arg_type=get_three_state_flag(), help='This indicates if the LDAP connection should be encrypted.')
+ c.argument('directory_services_settings_username_download_require_valid_certificate',
+ arg_type=get_three_state_flag(), help='Determines if the certificates should be validated by a '
+ 'certificate authority. When true, caCertificateURI must be provided.')
+ c.argument('directory_services_settings_username_download_auto_download_certificate',
+ arg_type=get_three_state_flag(), help='Determines if the certificate should be automatically '
+ 'downloaded. This applies to \'caCertificateURI\' when \'requireValidCertificate\' is true, or a '
+ 'self signed certificate otherwise.')
+ c.argument('directory_services_settings_username_download_ca_certificate_uri', type=str, help='The URI of the '
+ 'CA certificate to validate the LDAP secure connection. This field must be populated when '
+ '\'requireValidCertificate\' is set to true.')
+ c.argument('directory_services_settings_username_download_credentials',
+ action=AddDirectoryServicesSettingsUsernameDownloadCredentials, nargs='*', help='When present, '
+ 'these are the credentials for the secure LDAP connection.')
+ c.argument('directory_services_settings_active_directory_primary_dns_ip_address', type=str, help='Primary DNS '
+ 'IP address used to resolve the Active Directory domain controller\'s fully qualified domain name.')
+ c.argument('directory_services_settings_active_directory_secondary_dns_ip_address', type=str, help='Secondary '
+ 'DNS IP address used to resolve the Active Directory domain controller\'s fully qualified domain '
+ 'name.')
+ c.argument('directory_services_settings_active_directory_domain_name', type=str, help='The fully qualified '
+ 'domain name of the Active Directory domain controller.')
+ c.argument('directory_services_settings_active_directory_domain_net_bios', type=str, help='The Active '
+ 'Directory domain\'s NetBIOS name.')
+ c.argument('directory_services_settings_active_directory_smb_server_name', type=str, help='The name (NetBIOS) '
+ 'used for the HPC Cache to join the Active Directory domain. Length must not be greater than 15 and '
+ 'chars must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('security_settings_access_policies', action=AddSecuritySettingsAccessPolicies, nargs='*', help='NFS '
+ 'access policies defined for this cache.')
+ c.argument('encryption_settings_key_encryption_key_key_url', type=str, help='The URL referencing a key '
+ 'encryption key in Key Vault.')
+ c.argument('encryption_settings_key_encryption_key_source_vault_id', type=str, help='Resource Id.')
+ c.argument('network_settings_mtu', type=int, help='The IPv4 maximum transmission unit configured for the '
+ 'subnet.')
+ c.argument('sku_name', type=str, help='SKU name for this Cache.')
+ c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='The type of identity '
+ 'used for the cache')
+
+ with self.argument_context('storagecache cache delete') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache cache flush') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache cache start') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache cache stop') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache cache upgrade-firmware') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache cache wait') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache storage-target list') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache storage-target show') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name '
+ 'of the Storage Target. Length of name must not be greater than 80 and chars must be from the '
+ '[-0-9a-zA-Z_] char class.')
+
+ with self.argument_context('storagecache storage-target create') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name '
+ 'of the Storage Target. Length of name must not be greater than 80 and chars must be from the '
+ '[-0-9a-zA-Z_] char class.')
+ c.argument('junctions', action=AddJunctions, nargs='*', help='List of Cache namespace junctions to target for '
+ 'namespace associations.')
+ c.argument('target_type', arg_type=get_enum_type(['nfs3', 'clfs', 'unknown']), help='Type of the Storage '
+ 'Target.')
+ c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', ''
+ 'Deleting', 'Updating']), help='ARM provisioning '
+ 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi'
+ 'oningstate-property')
+ c.argument('nfs3', action=AddNfs3, nargs='*', help='Properties when targetType is nfs3.')
+ c.argument('unknown_unknown_map', action=AddUnknownUnknownMap, nargs='*', help='Dictionary of string->string '
+ 'pairs containing information about the Storage Target. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...')
+ c.argument('clfs_target', type=str, help='Resource ID of storage container.')
+
+ with self.argument_context('storagecache storage-target update') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name '
+ 'of the Storage Target. Length of name must not be greater than 80 and chars must be from the '
+ '[-0-9a-zA-Z_] char class.')
+ c.argument('junctions', action=AddJunctions, nargs='*', help='List of Cache namespace junctions to target for '
+ 'namespace associations.')
+ c.argument('target_type', arg_type=get_enum_type(['nfs3', 'clfs', 'unknown']), help='Type of the Storage '
+ 'Target.')
+ c.argument('provisioning_state', arg_type=get_enum_type(['Succeeded', 'Failed', 'Cancelled', 'Creating', ''
+ 'Deleting', 'Updating']), help='ARM provisioning '
+ 'state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisi'
+ 'oningstate-property')
+ c.argument('nfs3', action=AddNfs3, nargs='*', help='Properties when targetType is nfs3.')
+ c.argument('unknown_unknown_map', action=AddUnknownUnknownMap, nargs='*', help='Dictionary of string->string '
+ 'pairs containing information about the Storage Target. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...')
+ c.argument('clfs_target', type=str, help='Resource ID of storage container.')
+
+ with self.argument_context('storagecache storage-target delete') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name '
+ 'of Storage Target.')
+
+ with self.argument_context('storagecache storage-target wait') as c:
+ c.argument('resource_group_name', resource_group_name_type)
+ c.argument('cache_name', type=str, help='Name of Cache. Length of name must not be greater than 80 and chars '
+ 'must be from the [-0-9a-zA-Z_] char class.')
+ c.argument('storage_target_name', options_list=['--name', '-n', '--storage-target-name'], type=str, help='Name '
+ 'of the Storage Target. Length of name must not be greater than 80 and chars must be from the '
+ '[-0-9a-zA-Z_] char class.')
diff --git a/src/storagecache/azext_storagecache/generated/_validators.py b/src/storagecache/azext_storagecache/generated/_validators.py
new file mode 100644
index 00000000000..b33a44c1ebf
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/_validators.py
@@ -0,0 +1,9 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
diff --git a/src/storagecache/azext_storagecache/generated/action.py b/src/storagecache/azext_storagecache/generated/action.py
new file mode 100644
index 00000000000..c05e57bd2ef
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/action.py
@@ -0,0 +1,134 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=protected-access
+
+import argparse
+from collections import defaultdict
+from knack.util import CLIError
+
+
+class AddDirectoryServicesSettingsUsernameDownloadCredentials(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ action = self.get_action(values, option_string)
+ namespace.directory_services_settings_username_download_credentials = action
+
+ def get_action(self, values, option_string): # pylint: disable=no-self-use
+ try:
+ properties = defaultdict(list)
+ for (k, v) in (x.split('=', 1) for x in values):
+ properties[k].append(v)
+ properties = dict(properties)
+ except ValueError:
+ raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
+ d = {}
+ for k in properties:
+ kl = k.lower()
+ v = properties[k]
+ if kl == 'bind-dn':
+ d['bind_dn'] = v[0]
+ elif kl == 'bind-password':
+ d['bind_password'] = v[0]
+ return d
+
+
+class AddSecuritySettingsAccessPolicies(argparse._AppendAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ action = self.get_action(values, option_string)
+ super(AddSecuritySettingsAccessPolicies, self).__call__(parser, namespace, action, option_string)
+
+ def get_action(self, values, option_string): # pylint: disable=no-self-use
+ try:
+ properties = defaultdict(list)
+ for (k, v) in (x.split('=', 1) for x in values):
+ properties[k].append(v)
+ properties = dict(properties)
+ except ValueError:
+ raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
+ d = {}
+ for k in properties:
+ kl = k.lower()
+ v = properties[k]
+ if kl == 'name':
+ d['name'] = v[0]
+ elif kl == 'access-rules':
+ d['access_rules'] = v
+ return d
+
+
+class AddJunctions(argparse._AppendAction):
+ def __call__(self, parser, namespace, values, option_string=None):
+ action = self.get_action(values, option_string)
+ super(AddJunctions, self).__call__(parser, namespace, action, option_string)
+
+ def get_action(self, values, option_string): # pylint: disable=no-self-use
+ try:
+ properties = defaultdict(list)
+ for (k, v) in (x.split('=', 1) for x in values):
+ properties[k].append(v)
+ properties = dict(properties)
+ except ValueError:
+ raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
+ d = {}
+ for k in properties:
+ kl = k.lower()
+ v = properties[k]
+ if kl == 'namespace-path':
+ d['namespace_path'] = v[0]
+ elif kl == 'target-path':
+ d['target_path'] = v[0]
+ elif kl == 'nfs-export':
+ d['nfs_export'] = v[0]
+ elif kl == 'nfs-access-policy':
+ d['nfs_access_policy'] = v[0]
+ return d
+
+
+class AddNfs3(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ action = self.get_action(values, option_string)
+ namespace.nfs3 = action
+
+ def get_action(self, values, option_string): # pylint: disable=no-self-use
+ try:
+ properties = defaultdict(list)
+ for (k, v) in (x.split('=', 1) for x in values):
+ properties[k].append(v)
+ properties = dict(properties)
+ except ValueError:
+ raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
+ d = {}
+ for k in properties:
+ kl = k.lower()
+ v = properties[k]
+ if kl == 'target':
+ d['target'] = v[0]
+ elif kl == 'usage-model':
+ d['usage_model'] = v[0]
+ return d
+
+
+class AddUnknownUnknownMap(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ action = self.get_action(values, option_string)
+ namespace.unknown_unknown_map = action
+
+ def get_action(self, values, option_string): # pylint: disable=no-self-use
+ try:
+ properties = defaultdict(list)
+ for (k, v) in (x.split('=', 1) for x in values):
+ properties[k].append(v)
+ properties = dict(properties)
+ except ValueError:
+ raise CLIError('usage error: {} [KEY=VALUE ...]'.format(option_string))
+ d = {}
+ for k in properties:
+ v = properties[k]
+ d[k] = v[0]
+ return d
diff --git a/src/storagecache/azext_storagecache/generated/commands.py b/src/storagecache/azext_storagecache/generated/commands.py
new file mode 100644
index 00000000000..f15bd7b50e0
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/commands.py
@@ -0,0 +1,72 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-statements
+# pylint: disable=too-many-locals
+
+from azure.cli.core.commands import CliCommandType
+
+
+def load_command_table(self, _):
+
+ from azext_storagecache.generated._client_factory import cf_sku
+ storagecache_sku = CliCommandType(
+ operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._sku_operations#SkuOperations.{}',
+ client_factory=cf_sku)
+ with self.command_group('storagecache sku', storagecache_sku, client_factory=cf_sku, is_experimental=True) as g:
+ g.custom_command('list', 'storagecache_sku_list')
+
+ from azext_storagecache.generated._client_factory import cf_usage_model
+ storagecache_usage_model = CliCommandType(
+ operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._usage_model_operations#UsageModelOpe'
+ 'rations.{}',
+ client_factory=cf_usage_model)
+ with self.command_group('storagecache usage-model', storagecache_usage_model, client_factory=cf_usage_model,
+ is_experimental=True) as g:
+ g.custom_command('list', 'storagecache_usage_model_list')
+
+ from azext_storagecache.generated._client_factory import cf_ascoperation
+ storagecache_ascoperation = CliCommandType(
+ operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._asc_operation_operations#ASCOperatio'
+ 'nOperations.{}',
+ client_factory=cf_ascoperation)
+ with self.command_group('storagecache asc-operation', storagecache_ascoperation, client_factory=cf_ascoperation,
+ is_experimental=True) as g:
+ g.custom_show_command('show', 'storagecache_asc_operation_show')
+
+ from azext_storagecache.generated._client_factory import cf_cache
+ storagecache_cache = CliCommandType(
+ operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._cache_operations#CacheOperations.{}',
+ client_factory=cf_cache)
+ with self.command_group('storagecache cache', storagecache_cache, client_factory=cf_cache,
+ is_experimental=True) as g:
+ g.custom_command('list', 'storagecache_cache_list')
+ g.custom_show_command('show', 'storagecache_cache_show')
+ g.custom_command('create', 'storagecache_cache_create', supports_no_wait=True)
+ g.custom_command('update', 'storagecache_cache_update')
+ g.custom_command('delete', 'storagecache_cache_delete', supports_no_wait=True, confirmation=True)
+ g.custom_command('flush', 'storagecache_cache_flush', supports_no_wait=True)
+ g.custom_command('start', 'storagecache_cache_start', supports_no_wait=True)
+ g.custom_command('stop', 'storagecache_cache_stop', supports_no_wait=True)
+ g.custom_command('upgrade-firmware', 'storagecache_cache_upgrade_firmware', supports_no_wait=True)
+ g.custom_wait_command('wait', 'storagecache_cache_show')
+
+ from azext_storagecache.generated._client_factory import cf_storage_target
+ storagecache_storage_target = CliCommandType(
+ operations_tmpl='azext_storagecache.vendored_sdks.storagecache.operations._storage_target_operations#StorageTar'
+ 'getOperations.{}',
+ client_factory=cf_storage_target)
+ with self.command_group('storagecache storage-target', storagecache_storage_target,
+ client_factory=cf_storage_target, is_experimental=True) as g:
+ g.custom_command('list', 'storagecache_storage_target_list')
+ g.custom_show_command('show', 'storagecache_storage_target_show')
+ g.custom_command('create', 'storagecache_storage_target_create', supports_no_wait=True)
+ g.custom_command('update', 'storagecache_storage_target_update', supports_no_wait=True)
+ g.custom_command('delete', 'storagecache_storage_target_delete', supports_no_wait=True, confirmation=True)
+ g.custom_wait_command('wait', 'storagecache_storage_target_show')
diff --git a/src/storagecache/azext_storagecache/generated/custom.py b/src/storagecache/azext_storagecache/generated/custom.py
new file mode 100644
index 00000000000..38da669e86f
--- /dev/null
+++ b/src/storagecache/azext_storagecache/generated/custom.py
@@ -0,0 +1,303 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+# pylint: disable=line-too-long
+# pylint: disable=too-many-lines
+
+from azure.cli.core.util import sdk_no_wait
+
+
+def storagecache_sku_list(client):
+ return client.list()
+
+
+def storagecache_usage_model_list(client):
+ return client.list()
+
+
+def storagecache_asc_operation_show(client,
+ location,
+ operation_id):
+ return client.get(location=location,
+ operation_id=operation_id)
+
+
+def storagecache_cache_list(client,
+ resource_group_name=None):
+ if resource_group_name:
+ return client.list_by_resource_group(resource_group_name=resource_group_name)
+ return client.list()
+
+
+def storagecache_cache_show(client,
+ resource_group_name,
+ cache_name):
+ return client.get(resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_cache_create(client,
+ resource_group_name,
+ cache_name,
+ tags=None,
+ location=None,
+ cache_size_gb=None,
+ provisioning_state=None,
+ subnet=None,
+ directory_services_settings_username_download_extended_groups_enabled=None,
+ directory_services_settings_username_download_username_source=None,
+ directory_services_settings_username_download_group_file_uri=None,
+ directory_services_settings_username_download_user_file_uri=None,
+ directory_services_settings_username_download_ldap_server=None,
+ directory_services_settings_username_download_ldap_base_dn=None,
+ directory_services_settings_username_download_encrypt_ldap_connection=None,
+ directory_services_settings_username_download_require_valid_certificate=None,
+ directory_services_settings_username_download_auto_download_certificate=None,
+ directory_services_settings_username_download_ca_certificate_uri=None,
+ directory_services_settings_username_download_credentials=None,
+ directory_services_settings_active_directory_primary_dns_ip_address=None,
+ directory_services_settings_active_directory_secondary_dns_ip_address=None,
+ directory_services_settings_active_directory_domain_name=None,
+ directory_services_settings_active_directory_domain_net_bios=None,
+ directory_services_settings_active_directory_smb_server_name=None,
+ security_settings_access_policies=None,
+ encryption_settings_key_encryption_key_key_url=None,
+ encryption_settings_key_encryption_key_source_vault_id=None,
+ network_settings_mtu=None,
+ sku_name=None,
+ identity_type=None,
+ no_wait=False):
+ if directory_services_settings_username_download_username_source is None:
+ directory_services_settings_username_download_username_source = "None"
+ if network_settings_mtu is None:
+ network_settings_mtu = 1500
+ return sdk_no_wait(no_wait,
+ client.begin_create_or_update,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ tags=tags,
+ location=location,
+ cache_size_gb=cache_size_gb,
+ provisioning_state=provisioning_state,
+ subnet=subnet,
+ upgrade_status=None,
+ extended_groups_enabled=directory_services_settings_username_download_extended_groups_enabled,
+ username_source=directory_services_settings_username_download_username_source,
+ group_file_uri=directory_services_settings_username_download_group_file_uri,
+ user_file_uri=directory_services_settings_username_download_user_file_uri,
+ ldap_server=directory_services_settings_username_download_ldap_server,
+ ldap_base_dn=directory_services_settings_username_download_ldap_base_dn,
+ encrypt_ldap_connection=directory_services_settings_username_download_encrypt_ldap_connection,
+ require_valid_certificate=directory_services_settings_username_download_require_valid_certificate,
+ auto_download_certificate=directory_services_settings_username_download_auto_download_certificate,
+ ca_certificate_uri=directory_services_settings_username_download_ca_certificate_uri,
+ credentials=directory_services_settings_username_download_credentials,
+ primary_dns_ip_address=directory_services_settings_active_directory_primary_dns_ip_address,
+ secondary_dns_ip_address=directory_services_settings_active_directory_secondary_dns_ip_address,
+ domain_name=directory_services_settings_active_directory_domain_name,
+ domain_net_bios=directory_services_settings_active_directory_domain_net_bios,
+ smb_server_name=directory_services_settings_active_directory_smb_server_name,
+ access_policies=security_settings_access_policies,
+ key_url=encryption_settings_key_encryption_key_key_url,
+ id=encryption_settings_key_encryption_key_source_vault_id,
+ mtu=network_settings_mtu,
+ name=sku_name,
+ type=identity_type)
+
+
+def storagecache_cache_update(client,
+ resource_group_name,
+ cache_name,
+ tags=None,
+ location=None,
+ cache_size_gb=None,
+ provisioning_state=None,
+ subnet=None,
+ directory_services_settings_username_download_extended_groups_enabled=None,
+ directory_services_settings_username_download_username_source=None,
+ directory_services_settings_username_download_group_file_uri=None,
+ directory_services_settings_username_download_user_file_uri=None,
+ directory_services_settings_username_download_ldap_server=None,
+ directory_services_settings_username_download_ldap_base_dn=None,
+ directory_services_settings_username_download_encrypt_ldap_connection=None,
+ directory_services_settings_username_download_require_valid_certificate=None,
+ directory_services_settings_username_download_auto_download_certificate=None,
+ directory_services_settings_username_download_ca_certificate_uri=None,
+ directory_services_settings_username_download_credentials=None,
+ directory_services_settings_active_directory_primary_dns_ip_address=None,
+ directory_services_settings_active_directory_secondary_dns_ip_address=None,
+ directory_services_settings_active_directory_domain_name=None,
+ directory_services_settings_active_directory_domain_net_bios=None,
+ directory_services_settings_active_directory_smb_server_name=None,
+ security_settings_access_policies=None,
+ encryption_settings_key_encryption_key_key_url=None,
+ encryption_settings_key_encryption_key_source_vault_id=None,
+ network_settings_mtu=None,
+ sku_name=None,
+ identity_type=None):
+ if directory_services_settings_username_download_username_source is None:
+ directory_services_settings_username_download_username_source = "None"
+ if network_settings_mtu is None:
+ network_settings_mtu = 1500
+ return client.update(resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ tags=tags,
+ location=location,
+ cache_size_gb=cache_size_gb,
+ provisioning_state=provisioning_state,
+ subnet=subnet,
+ upgrade_status=None,
+ extended_groups_enabled=directory_services_settings_username_download_extended_groups_enabled,
+ username_source=directory_services_settings_username_download_username_source,
+ group_file_uri=directory_services_settings_username_download_group_file_uri,
+ user_file_uri=directory_services_settings_username_download_user_file_uri,
+ ldap_server=directory_services_settings_username_download_ldap_server,
+ ldap_base_dn=directory_services_settings_username_download_ldap_base_dn,
+ encrypt_ldap_connection=directory_services_settings_username_download_encrypt_ldap_connection,
+ require_valid_certificate=directory_services_settings_username_download_require_valid_certificate,
+ auto_download_certificate=directory_services_settings_username_download_auto_download_certificate,
+ ca_certificate_uri=directory_services_settings_username_download_ca_certificate_uri,
+ credentials=directory_services_settings_username_download_credentials,
+ primary_dns_ip_address=directory_services_settings_active_directory_primary_dns_ip_address,
+ secondary_dns_ip_address=directory_services_settings_active_directory_secondary_dns_ip_address,
+ domain_name=directory_services_settings_active_directory_domain_name,
+ domain_net_bios=directory_services_settings_active_directory_domain_net_bios,
+ smb_server_name=directory_services_settings_active_directory_smb_server_name,
+ access_policies=security_settings_access_policies,
+ key_url=encryption_settings_key_encryption_key_key_url,
+ id=encryption_settings_key_encryption_key_source_vault_id,
+ mtu=network_settings_mtu,
+ name=sku_name,
+ type=identity_type)
+
+
+def storagecache_cache_delete(client,
+ resource_group_name,
+ cache_name,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_delete,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_cache_flush(client,
+ resource_group_name,
+ cache_name,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_flush,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_cache_start(client,
+ resource_group_name,
+ cache_name,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_start,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_cache_stop(client,
+ resource_group_name,
+ cache_name,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_stop,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_cache_upgrade_firmware(client,
+ resource_group_name,
+ cache_name,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_upgrade_firmware,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_storage_target_list(client,
+ resource_group_name,
+ cache_name):
+ return client.list_by_cache(resource_group_name=resource_group_name,
+ cache_name=cache_name)
+
+
+def storagecache_storage_target_show(client,
+ resource_group_name,
+ cache_name,
+ storage_target_name):
+ return client.get(resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name)
+
+
+def storagecache_storage_target_create(client,
+ resource_group_name,
+ cache_name,
+ storage_target_name,
+ junctions=None,
+ target_type=None,
+ provisioning_state=None,
+ nfs3=None,
+ unknown_unknown_map=None,
+ clfs_target=None,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_create_or_update,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name,
+ junctions=junctions,
+ target_type=target_type,
+ provisioning_state=provisioning_state,
+ nfs3=nfs3,
+ unknown_map=unknown_unknown_map,
+ target=clfs_target)
+
+
+def storagecache_storage_target_update(client,
+ resource_group_name,
+ cache_name,
+ storage_target_name,
+ junctions=None,
+ target_type=None,
+ provisioning_state=None,
+ nfs3=None,
+ unknown_unknown_map=None,
+ clfs_target=None,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_create_or_update,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name,
+ junctions=junctions,
+ target_type=target_type,
+ provisioning_state=provisioning_state,
+ nfs3=nfs3,
+ unknown_map=unknown_unknown_map,
+ target=clfs_target)
+
+
+def storagecache_storage_target_delete(client,
+ resource_group_name,
+ cache_name,
+ storage_target_name,
+ no_wait=False):
+ return sdk_no_wait(no_wait,
+ client.begin_delete,
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name)
diff --git a/src/storagecache/azext_storagecache/manual/__init__.py b/src/storagecache/azext_storagecache/manual/__init__.py
new file mode 100644
index 00000000000..c9cfdc73e77
--- /dev/null
+++ b/src/storagecache/azext_storagecache/manual/__init__.py
@@ -0,0 +1,12 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/src/storagecache/azext_storagecache/tests/__init__.py b/src/storagecache/azext_storagecache/tests/__init__.py
new file mode 100644
index 00000000000..50e0627daff
--- /dev/null
+++ b/src/storagecache/azext_storagecache/tests/__init__.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+import inspect
+import logging
+import os
+import sys
+import traceback
+import datetime as dt
+
+from azure.core.exceptions import AzureError
+from azure.cli.testsdk.exceptions import CliTestError, CliExecutionError, JMESPathCheckAssertionError
+
+
+logger = logging.getLogger('azure.cli.testsdk')
+logger.addHandler(logging.StreamHandler())
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
+exceptions = []
+test_map = dict()
+SUCCESSED = "successed"
+FAILED = "failed"
+
+
+def try_manual(func):
+ def import_manual_function(origin_func):
+ from importlib import import_module
+ decorated_path = inspect.getfile(origin_func)
+ module_path = __path__[0]
+ if not decorated_path.startswith(module_path):
+ raise Exception("Decorator can only be used in submodules!")
+ manual_path = os.path.join(
+ decorated_path[module_path.rfind(os.path.sep) + 1:])
+ manual_file_path, manual_file_name = os.path.split(manual_path)
+ module_name, _ = os.path.splitext(manual_file_name)
+ manual_module = "..manual." + \
+ ".".join(manual_file_path.split(os.path.sep) + [module_name, ])
+ return getattr(import_module(manual_module, package=__name__), origin_func.__name__)
+
+ def get_func_to_call():
+ func_to_call = func
+ try:
+ func_to_call = import_manual_function(func)
+ func_to_call = import_manual_function(func)
+ logger.info("Found manual override for %s(...)", func.__name__)
+ except (ImportError, AttributeError):
+ pass
+ return func_to_call
+
+ def wrapper(*args, **kwargs):
+ func_to_call = get_func_to_call()
+ logger.info("running %s()...", func.__name__)
+ try:
+ test_map[func.__name__] = dict()
+ test_map[func.__name__]["result"] = SUCCESSED
+ test_map[func.__name__]["error_message"] = ""
+ test_map[func.__name__]["error_stack"] = ""
+ test_map[func.__name__]["error_normalized"] = ""
+ test_map[func.__name__]["start_dt"] = dt.datetime.utcnow()
+ ret = func_to_call(*args, **kwargs)
+ except (AssertionError, AzureError, CliTestError, CliExecutionError, SystemExit,
+ JMESPathCheckAssertionError) as e:
+ test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
+ test_map[func.__name__]["result"] = FAILED
+ test_map[func.__name__]["error_message"] = str(e).replace("\r\n", " ").replace("\n", " ")[:500]
+ test_map[func.__name__]["error_stack"] = traceback.format_exc().replace(
+ "\r\n", " ").replace("\n", " ")[:500]
+ logger.info("--------------------------------------")
+ logger.info("step exception: %s", e)
+ logger.error("--------------------------------------")
+ logger.error("step exception in %s: %s", func.__name__, e)
+ logger.info(traceback.format_exc())
+ exceptions.append((func.__name__, sys.exc_info()))
+ else:
+ test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
+ return ret
+
+ if inspect.isclass(func):
+ return get_func_to_call()
+ return wrapper
+
+
+def calc_coverage(filename):
+ filename = filename.split(".")[0]
+ coverage_name = filename + "_coverage.md"
+ with open(coverage_name, "w") as f:
+ f.write("|Scenario|Result|ErrorMessage|ErrorStack|ErrorNormalized|StartDt|EndDt|\n")
+ total = len(test_map)
+ covered = 0
+ for k, v in test_map.items():
+ if not k.startswith("step_"):
+ total -= 1
+ continue
+ if v["result"] == SUCCESSED:
+ covered += 1
+ f.write("|{step_name}|{result}|{error_message}|{error_stack}|{error_normalized}|{start_dt}|"
+ "{end_dt}|\n".format(step_name=k, **v))
+ f.write("Coverage: {}/{}\n".format(covered, total))
+ print("Create coverage\n", file=sys.stderr)
+
+
+def raise_if():
+ if exceptions:
+ if len(exceptions) <= 1:
+ raise exceptions[0][1][1]
+ message = "{}\nFollowed with exceptions in other steps:\n".format(str(exceptions[0][1][1]))
+ message += "\n".join(["{}: {}".format(h[0], h[1][1]) for h in exceptions[1:]])
+ raise exceptions[0][1][0](message).with_traceback(exceptions[0][1][2])
diff --git a/src/storagecache/azext_storagecache/tests/latest/__init__.py b/src/storagecache/azext_storagecache/tests/latest/__init__.py
new file mode 100644
index 00000000000..c9cfdc73e77
--- /dev/null
+++ b/src/storagecache/azext_storagecache/tests/latest/__init__.py
@@ -0,0 +1,12 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/src/storagecache/azext_storagecache/tests/latest/preparers.py b/src/storagecache/azext_storagecache/tests/latest/preparers.py
new file mode 100644
index 00000000000..0879e51945a
--- /dev/null
+++ b/src/storagecache/azext_storagecache/tests/latest/preparers.py
@@ -0,0 +1,159 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+import os
+from datetime import datetime
+from azure_devtools.scenario_tests import SingleValueReplacer
+from azure.cli.testsdk.preparers import NoTrafficRecordingPreparer
+from azure.cli.testsdk.exceptions import CliTestError
+from azure.cli.testsdk.reverse_dependency import get_dummy_cli
+
+
+KEY_RESOURCE_GROUP = 'rg'
+KEY_VIRTUAL_NETWORK = 'vnet'
+KEY_VNET_SUBNET = 'subnet'
+KEY_VNET_NIC = 'nic'
+
+
+class VirtualNetworkPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
+ def __init__(self, name_prefix='clitest.vn',
+ parameter_name='virtual_network',
+ resource_group_name=None,
+ resource_group_key=KEY_RESOURCE_GROUP,
+ dev_setting_name='AZURE_CLI_TEST_DEV_VIRTUAL_NETWORK_NAME',
+ random_name_length=24, key=KEY_VIRTUAL_NETWORK):
+ if ' ' in name_prefix:
+ raise CliTestError(
+ 'Error: Space character in name prefix \'%s\'' % name_prefix)
+ super(VirtualNetworkPreparer, self).__init__(
+ name_prefix, random_name_length)
+ self.cli_ctx = get_dummy_cli()
+ self.parameter_name = parameter_name
+ self.key = key
+ self.resource_group_name = resource_group_name
+ self.resource_group_key = resource_group_key
+ self.dev_setting_name = os.environ.get(dev_setting_name, None)
+
+ def create_resource(self, name, **_):
+ if self.dev_setting_name:
+ return {self.parameter_name: self.dev_setting_name, }
+
+ if not self.resource_group_name:
+ self.resource_group_name = self.test_class_instance.kwargs.get(
+ self.resource_group_key)
+ if not self.resource_group_name:
+ raise CliTestError("Error: No resource group configured!")
+
+ tags = {'product': 'azurecli', 'cause': 'automation',
+ 'date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}
+ if 'ENV_JOB_NAME' in os.environ:
+ tags['job'] = os.environ['ENV_JOB_NAME']
+ tags = ' '.join(['{}={}'.format(key, value)
+ for key, value in tags.items()])
+ template = 'az network vnet create --resource-group {} --name {} --subnet-name default --tag ' + tags
+ self.live_only_execute(self.cli_ctx, template.format(
+ self.resource_group_name, name))
+
+ self.test_class_instance.kwargs[self.key] = name
+ return {self.parameter_name: name}
+
+ def remove_resource(self, name, **_):
+ # delete vnet if test is being recorded and if the vnet is not a dev rg
+ if not self.dev_setting_name:
+ self.live_only_execute(
+ self.cli_ctx,
+ 'az network vnet delete --name {} --resource-group {}'.format(name, self.resource_group_name))
+
+
+class VnetSubnetPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
+ def __init__(self, name_prefix='clitest.vn',
+ parameter_name='subnet',
+ resource_group_key=KEY_RESOURCE_GROUP,
+ vnet_key=KEY_VIRTUAL_NETWORK,
+ address_prefixes="11.0.0.0/24",
+ dev_setting_name='AZURE_CLI_TEST_DEV_VNET_SUBNET_NAME',
+ key=KEY_VNET_SUBNET):
+ if ' ' in name_prefix:
+ raise CliTestError(
+ 'Error: Space character in name prefix \'%s\'' % name_prefix)
+ super(VnetSubnetPreparer, self).__init__(name_prefix, 15)
+ self.cli_ctx = get_dummy_cli()
+ self.parameter_name = parameter_name
+ self.key = key
+ self.resource_group = [resource_group_key, None]
+ self.vnet = [vnet_key, None]
+ self.address_prefixes = address_prefixes
+ self.dev_setting_name = os.environ.get(dev_setting_name, None)
+
+ def create_resource(self, name, **_):
+ if self.dev_setting_name:
+ return {self.parameter_name: self.dev_setting_name, }
+
+ if not self.resource_group[1]:
+ self.resource_group[1] = self.test_class_instance.kwargs.get(
+ self.resource_group[0])
+ if not self.resource_group[1]:
+ raise CliTestError("Error: No resource group configured!")
+ if not self.vnet[1]:
+ self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0])
+ if not self.vnet[1]:
+ raise CliTestError("Error: No vnet configured!")
+
+ self.test_class_instance.kwargs[self.key] = 'default'
+ return {self.parameter_name: name}
+
+ def remove_resource(self, name, **_):
+ pass
+
+
+class VnetNicPreparer(NoTrafficRecordingPreparer, SingleValueReplacer):
+ def __init__(self, name_prefix='clitest.nic',
+ parameter_name='subnet',
+ resource_group_key=KEY_RESOURCE_GROUP,
+ vnet_key=KEY_VIRTUAL_NETWORK,
+ dev_setting_name='AZURE_CLI_TEST_DEV_VNET_NIC_NAME',
+ key=KEY_VNET_NIC):
+ if ' ' in name_prefix:
+ raise CliTestError(
+ 'Error: Space character in name prefix \'%s\'' % name_prefix)
+ super(VnetNicPreparer, self).__init__(name_prefix, 15)
+ self.cli_ctx = get_dummy_cli()
+ self.parameter_name = parameter_name
+ self.key = key
+ self.resource_group = [resource_group_key, None]
+ self.vnet = [vnet_key, None]
+ self.dev_setting_name = os.environ.get(dev_setting_name, None)
+
+ def create_resource(self, name, **_):
+ if self.dev_setting_name:
+ return {self.parameter_name: self.dev_setting_name, }
+
+ if not self.resource_group[1]:
+ self.resource_group[1] = self.test_class_instance.kwargs.get(
+ self.resource_group[0])
+ if not self.resource_group[1]:
+ raise CliTestError("Error: No resource group configured!")
+ if not self.vnet[1]:
+ self.vnet[1] = self.test_class_instance.kwargs.get(self.vnet[0])
+ if not self.vnet[1]:
+ raise CliTestError("Error: No vnet configured!")
+
+ template = 'az network nic create --resource-group {} --name {} --vnet-name {} --subnet default '
+ self.live_only_execute(self.cli_ctx, template.format(
+ self.resource_group[1], name, self.vnet[1]))
+
+ self.test_class_instance.kwargs[self.key] = name
+ return {self.parameter_name: name}
+
+ def remove_resource(self, name, **_):
+ if not self.dev_setting_name:
+ self.live_only_execute(
+ self.cli_ctx,
+ 'az network nic delete --name {} --resource-group {}'.format(name, self.resource_group[1]))
diff --git a/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py b/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py
new file mode 100644
index 00000000000..74786a9739d
--- /dev/null
+++ b/src/storagecache/azext_storagecache/tests/latest/test_storagecache_scenario.py
@@ -0,0 +1,310 @@
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+import os
+from azure.cli.testsdk import ScenarioTest
+from .. import try_manual, raise_if, calc_coverage
+from azure.cli.testsdk import ResourceGroupPreparer
+from .preparers import VirtualNetworkPreparer
+
+
+TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
+
+
+# Env setup
+@try_manual
+def setup(test, rg):
+ pass
+
+
+# EXAMPLE: /AscOperations/get/AscOperations_Get
+@try_manual
+def step__ascoperations_get_ascoperations_get(test, rg):
+ test.cmd('az storagecache asc-operation show '
+ '--operation-id "testoperationid" '
+ '--location "westus"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/put/Caches_CreateOrUpdate
+@try_manual
+def step__caches_put_caches_createorupdate(test, rg):
+ test.cmd('az storagecache cache create '
+ '--location "westus" '
+ '--cache-size-gb 3072 '
+ '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork'
+ 's/{vn}/subnets/default" '
+ '--sku-name "Standard_2G" '
+ '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/put/Caches_CreateOrUpdate_ldap_only
+@try_manual
+def step__caches_put_caches_createorupdate_ldap_only(test, rg):
+ test.cmd('az storagecache cache create '
+ '--location "westus" '
+ '--cache-size-gb 3072 '
+ '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork'
+ 's/{vn}/subnets/default" '
+ '--sku-name "Standard_2G" '
+ '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/get/Caches_Get
+@try_manual
+def step__caches_get_caches_get(test, rg):
+ test.cmd('az storagecache cache show '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/get/Caches_List
+@try_manual
+def step__caches_get_caches_list(test, rg):
+ test.cmd('az storagecache cache list '
+ '-g ""',
+ checks=[])
+
+
+# EXAMPLE: /Caches/get/Caches_ListByResourceGroup
+@try_manual
+def step__caches_get_caches_listbyresourcegroup(test, rg):
+ test.cmd('az storagecache cache list '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/patch/Caches_Update
+@try_manual
+def step__caches_patch_caches_update(test, rg):
+ test.cmd('az storagecache cache update '
+ '--location "westus" '
+ '--cache-size-gb 3072 '
+ '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork'
+ 's/{vn}/subnets/default" '
+ '--sku-name "Standard_2G" '
+ '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/patch/Caches_Update_ldap_only
+@try_manual
+def step__caches_patch_caches_update_ldap_only(test, rg):
+ test.cmd('az storagecache cache update '
+ '--location "westus" '
+ '--cache-size-gb 3072 '
+ '--subnet "/subscriptions/{subscription_id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetwork'
+ 's/{vn}/subnets/default" '
+ '--sku-name "Standard_2G" '
+ '--tags "{{\\"Dept\\":\\"ContosoAds\\"}}" '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/post/Caches_Flush
+@try_manual
+def step__caches_post_caches_flush(test, rg):
+ test.cmd('az storagecache cache flush '
+ '--cache-name "sc" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/post/Caches_Start
+@try_manual
+def step__caches_post_caches_start(test, rg):
+ test.cmd('az storagecache cache start '
+ '--cache-name "sc" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/post/Caches_Stop
+@try_manual
+def step__caches_post_caches_stop(test, rg):
+ test.cmd('az storagecache cache stop '
+ '--cache-name "sc" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/post/Caches_UpgradeFirmware
+@try_manual
+def step__caches_post_caches_upgradefirmware(test, rg):
+ test.cmd('az storagecache cache upgrade-firmware '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Caches/delete/Caches_Delete
+@try_manual
+def step__caches_delete_caches_delete(test, rg):
+ test.cmd('az storagecache cache delete -y '
+ '--cache-name "sc" '
+ '--resource-group "{rg}"',
+ checks=[])
+
+
+# EXAMPLE: /Skus/get/Skus_List
+@try_manual
+def step__skus_get_skus_list(test, rg):
+ test.cmd('az storagecache sku list',
+ checks=[])
+
+
+# EXAMPLE: /StorageTargets/put/StorageTargets_CreateOrUpdate
+@try_manual
+def step__storagetargets_put(test, rg):
+ test.cmd('az storagecache storage-target create '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}" '
+ '--name "{myStorageTarget}" '
+ '--junctions namespace-path="/path/on/cache" nfs-access-policy="default" nfs-export="exp1" '
+ 'target-path="/path/on/exp1" '
+ '--junctions namespace-path="/path2/on/cache" nfs-access-policy="rootSquash" nfs-export="exp2" '
+ 'target-path="/path2/on/exp2" '
+ '--nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" '
+ '--target-type "nfs3"',
+ checks=[
+ test.check("name", "{myStorageTarget}", case_sensitive=False),
+ test.check("nfs3.target", "10.0.44.44", case_sensitive=False),
+ test.check("nfs3.usageModel", "READ_HEAVY_INFREQ", case_sensitive=False),
+ test.check("targetType", "nfs3", case_sensitive=False),
+ ])
+ test.cmd('az storagecache storage-target wait --created '
+ '--resource-group "{rg}" '
+ '--name "{myStorageTarget}"',
+ checks=[])
+
+
+# EXAMPLE: /StorageTargets/put/StorageTargets_CreateOrUpdate_NoJunctions
+@try_manual
+def step__storagetargets_put2(test, rg):
+ test.cmd('az storagecache storage-target create '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}" '
+ '--name "{myStorageTarget}" '
+ '--nfs3 target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" '
+ '--target-type "nfs3"',
+ checks=[
+ test.check("name", "{myStorageTarget}", case_sensitive=False),
+ test.check("nfs3.target", "10.0.44.44", case_sensitive=False),
+ test.check("nfs3.usageModel", "READ_HEAVY_INFREQ", case_sensitive=False),
+ test.check("targetType", "nfs3", case_sensitive=False),
+ ])
+ test.cmd('az storagecache storage-target wait --created '
+ '--resource-group "{rg}" '
+ '--name "{myStorageTarget}"',
+ checks=[])
+
+
+# EXAMPLE: /StorageTargets/get/StorageTargets_Get
+@try_manual
+def step__storagetargets_get_storagetargets_get(test, rg):
+ test.cmd('az storagecache storage-target show '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}" '
+ '--name "{myStorageTarget}"',
+ checks=[
+ test.check("name", "{myStorageTarget}", case_sensitive=False),
+ test.check("targetType", "nfs3", case_sensitive=False),
+ ])
+
+
+# EXAMPLE: /StorageTargets/get/StorageTargets_List
+@try_manual
+def step__storagetargets_get_storagetargets_list(test, rg):
+ test.cmd('az storagecache storage-target list '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}"',
+ checks=[
+ test.check('length(@)', 1),
+ ])
+
+
+# EXAMPLE: /StorageTargets/delete/StorageTargets_Delete
+@try_manual
+def step__storagetargets_delete_storagetargets_delete(test, rg):
+ test.cmd('az storagecache storage-target delete -y '
+ '--cache-name "sc1" '
+ '--resource-group "{rg}" '
+ '--name "{myStorageTarget}"',
+ checks=[])
+
+
+# EXAMPLE: /UsageModels/get/UsageModels_List
+@try_manual
+def step__usagemodels_get_usagemodels_list(test, rg):
+ test.cmd('az storagecache usage-model list',
+ checks=[])
+
+
+# Env cleanup
+@try_manual
+def cleanup(test, rg):
+ pass
+
+
+# Testcase
+@try_manual
+def call_scenario(test, rg):
+ setup(test, rg)
+ step__ascoperations_get_ascoperations_get(test, rg)
+ step__caches_put_caches_createorupdate(test, rg)
+ step__caches_put_caches_createorupdate_ldap_only(test, rg)
+ step__caches_get_caches_get(test, rg)
+ step__caches_get_caches_list(test, rg)
+ step__caches_get_caches_listbyresourcegroup(test, rg)
+ step__caches_patch_caches_update(test, rg)
+ step__caches_patch_caches_update_ldap_only(test, rg)
+ step__caches_post_caches_flush(test, rg)
+ step__caches_post_caches_start(test, rg)
+ step__caches_post_caches_stop(test, rg)
+ step__caches_post_caches_upgradefirmware(test, rg)
+ step__caches_delete_caches_delete(test, rg)
+ step__skus_get_skus_list(test, rg)
+ step__storagetargets_put(test, rg)
+ step__storagetargets_put2(test, rg)
+ step__storagetargets_get_storagetargets_get(test, rg)
+ step__storagetargets_get_storagetargets_list(test, rg)
+ step__storagetargets_delete_storagetargets_delete(test, rg)
+ step__usagemodels_get_usagemodels_list(test, rg)
+ cleanup(test, rg)
+
+
+@try_manual
+class StorageCacheManagementClientScenarioTest(ScenarioTest):
+
+ @ResourceGroupPreparer(name_prefix='cliteststoragecache_scgroup'[:7], key='rg', parameter_name='rg')
+ @VirtualNetworkPreparer(name_prefix='cliteststoragecache_scvnet'[:7], key='vn', resource_group_key='rg')
+ def test_storagecache(self, rg):
+
+ self.kwargs.update({
+ 'subscription_id': self.get_subscription_id()
+ })
+
+ self.kwargs.update({
+ 'myStorageTarget': 'st1',
+ })
+
+ call_scenario(self, rg)
+ calc_coverage(__file__)
+ raise_if()
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/__init__.py
new file mode 100644
index 00000000000..c9cfdc73e77
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/__init__.py
@@ -0,0 +1,12 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+#
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is
+# regenerated.
+# --------------------------------------------------------------------------
+
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py
new file mode 100644
index 00000000000..1d1e4ecf4e2
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/__init__.py
@@ -0,0 +1,16 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._storage_cache_management_client import StorageCacheManagementClient
+__all__ = ['StorageCacheManagementClient']
+
+try:
+ from ._patch import patch_sdk # type: ignore
+ patch_sdk()
+except ImportError:
+ pass
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py
new file mode 100644
index 00000000000..6d37443dd7d
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_configuration.py
@@ -0,0 +1,70 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import TYPE_CHECKING
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+from azure.mgmt.core.policies import ARMHttpLoggingPolicy
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any
+
+ from azure.core.credentials import TokenCredential
+
+VERSION = "unknown"
+
+class StorageCacheManagementClientConfiguration(Configuration):
+ """Configuration for StorageCacheManagementClient.
+
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param credential: Credential needed for the client to connect to Azure.
+ :type credential: ~azure.core.credentials.TokenCredential
+ :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
+ :type subscription_id: str
+ """
+
+ def __init__(
+ self,
+ credential, # type: "TokenCredential"
+ subscription_id, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ if credential is None:
+ raise ValueError("Parameter 'credential' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+ super(StorageCacheManagementClientConfiguration, self).__init__(**kwargs)
+
+ self.credential = credential
+ self.subscription_id = subscription_id
+ self.api_version = "2020-10-01"
+ self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
+ kwargs.setdefault('sdk_moniker', 'storagecachemanagementclient/{}'.format(VERSION))
+ self._configure(**kwargs)
+
+ def _configure(
+ self,
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
+ self.authentication_policy = kwargs.get('authentication_policy')
+ if self.credential and not self.authentication_policy:
+ self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py
new file mode 100644
index 00000000000..f0f1db4bf8d
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/_storage_cache_management_client.py
@@ -0,0 +1,94 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import TYPE_CHECKING
+
+from azure.mgmt.core import ARMPipelineClient
+from msrest import Deserializer, Serializer
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Optional
+
+ from azure.core.credentials import TokenCredential
+
+from ._configuration import StorageCacheManagementClientConfiguration
+from .operations import OperationOperations
+from .operations import SkuOperations
+from .operations import UsageModelOperations
+from .operations import ASCOperationOperations
+from .operations import CacheOperations
+from .operations import StorageTargetOperations
+from . import models
+
+
+class StorageCacheManagementClient(object):
+ """A Storage Cache provides scalable caching service for NAS clients, serving data from either NFSv3 or Blob at-rest storage (referred to as "Storage Targets"). These operations allow you to manage Caches.
+
+ :ivar operation: OperationOperations operations
+ :vartype operation: storage_cache_management_client.operations.OperationOperations
+ :ivar sku: SkuOperations operations
+ :vartype sku: storage_cache_management_client.operations.SkuOperations
+ :ivar usage_model: UsageModelOperations operations
+ :vartype usage_model: storage_cache_management_client.operations.UsageModelOperations
+ :ivar asc_operation: ASCOperationOperations operations
+ :vartype asc_operation: storage_cache_management_client.operations.ASCOperationOperations
+ :ivar cache: CacheOperations operations
+ :vartype cache: storage_cache_management_client.operations.CacheOperations
+ :ivar storage_target: StorageTargetOperations operations
+ :vartype storage_target: storage_cache_management_client.operations.StorageTargetOperations
+ :param credential: Credential needed for the client to connect to Azure.
+ :type credential: ~azure.core.credentials.TokenCredential
+ :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
+ :type subscription_id: str
+ :param str base_url: Service URL
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ """
+
+ def __init__(
+ self,
+ credential, # type: "TokenCredential"
+ subscription_id, # type: str
+ base_url=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ if not base_url:
+ base_url = 'https://management.azure.com'
+ self._config = StorageCacheManagementClientConfiguration(credential, subscription_id, **kwargs)
+ self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
+ self.operation = OperationOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.sku = SkuOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.usage_model = UsageModelOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.asc_operation = ASCOperationOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.cache = CacheOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.storage_target = StorageTargetOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ def close(self):
+ # type: () -> None
+ self._client.close()
+
+ def __enter__(self):
+ # type: () -> StorageCacheManagementClient
+ self._client.__enter__()
+ return self
+
+ def __exit__(self, *exc_details):
+ # type: (Any) -> None
+ self._client.__exit__(*exc_details)
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py
new file mode 100644
index 00000000000..b1121365385
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/__init__.py
@@ -0,0 +1,10 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._storage_cache_management_client import StorageCacheManagementClient
+__all__ = ['StorageCacheManagementClient']
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration.py
new file mode 100644
index 00000000000..126896402d4
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_configuration.py
@@ -0,0 +1,66 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, TYPE_CHECKING
+
+from azure.core.configuration import Configuration
+from azure.core.pipeline import policies
+from azure.mgmt.core.policies import ARMHttpLoggingPolicy
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from azure.core.credentials_async import AsyncTokenCredential
+
+VERSION = "unknown"
+
+class StorageCacheManagementClientConfiguration(Configuration):
+ """Configuration for StorageCacheManagementClient.
+
+ Note that all parameters used to create this instance are saved as instance
+ attributes.
+
+ :param credential: Credential needed for the client to connect to Azure.
+ :type credential: ~azure.core.credentials_async.AsyncTokenCredential
+ :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
+ :type subscription_id: str
+ """
+
+ def __init__(
+ self,
+ credential: "AsyncTokenCredential",
+ subscription_id: str,
+ **kwargs: Any
+ ) -> None:
+ if credential is None:
+ raise ValueError("Parameter 'credential' must not be None.")
+ if subscription_id is None:
+ raise ValueError("Parameter 'subscription_id' must not be None.")
+ super(StorageCacheManagementClientConfiguration, self).__init__(**kwargs)
+
+ self.credential = credential
+ self.subscription_id = subscription_id
+ self.api_version = "2020-10-01"
+ self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
+ kwargs.setdefault('sdk_moniker', 'storagecachemanagementclient/{}'.format(VERSION))
+ self._configure(**kwargs)
+
+ def _configure(
+ self,
+ **kwargs: Any
+ ) -> None:
+ self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
+ self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
+ self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
+ self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
+ self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
+ self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
+ self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
+ self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
+ self.authentication_policy = kwargs.get('authentication_policy')
+ if self.credential and not self.authentication_policy:
+ self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client.py
new file mode 100644
index 00000000000..2276716af3d
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/_storage_cache_management_client.py
@@ -0,0 +1,88 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from typing import Any, Optional, TYPE_CHECKING
+
+from azure.mgmt.core import AsyncARMPipelineClient
+from msrest import Deserializer, Serializer
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from azure.core.credentials_async import AsyncTokenCredential
+
+from ._configuration import StorageCacheManagementClientConfiguration
+from .operations import OperationOperations
+from .operations import SkuOperations
+from .operations import UsageModelOperations
+from .operations import ASCOperationOperations
+from .operations import CacheOperations
+from .operations import StorageTargetOperations
+from .. import models
+
+
+class StorageCacheManagementClient(object):
+ """A Storage Cache provides scalable caching service for NAS clients, serving data from either NFSv3 or Blob at-rest storage (referred to as "Storage Targets"). These operations allow you to manage Caches.
+
+ :ivar operation: OperationOperations operations
+ :vartype operation: storage_cache_management_client.aio.operations.OperationOperations
+ :ivar sku: SkuOperations operations
+ :vartype sku: storage_cache_management_client.aio.operations.SkuOperations
+ :ivar usage_model: UsageModelOperations operations
+ :vartype usage_model: storage_cache_management_client.aio.operations.UsageModelOperations
+ :ivar asc_operation: ASCOperationOperations operations
+ :vartype asc_operation: storage_cache_management_client.aio.operations.ASCOperationOperations
+ :ivar cache: CacheOperations operations
+ :vartype cache: storage_cache_management_client.aio.operations.CacheOperations
+ :ivar storage_target: StorageTargetOperations operations
+ :vartype storage_target: storage_cache_management_client.aio.operations.StorageTargetOperations
+ :param credential: Credential needed for the client to connect to Azure.
+ :type credential: ~azure.core.credentials_async.AsyncTokenCredential
+ :param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
+ :type subscription_id: str
+ :param str base_url: Service URL
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ """
+
+ def __init__(
+ self,
+ credential: "AsyncTokenCredential",
+ subscription_id: str,
+ base_url: Optional[str] = None,
+ **kwargs: Any
+ ) -> None:
+ if not base_url:
+ base_url = 'https://management.azure.com'
+ self._config = StorageCacheManagementClientConfiguration(credential, subscription_id, **kwargs)
+ self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
+
+ client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
+ self._serialize = Serializer(client_models)
+ self._deserialize = Deserializer(client_models)
+
+ self.operation = OperationOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.sku = SkuOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.usage_model = UsageModelOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.asc_operation = ASCOperationOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.cache = CacheOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+ self.storage_target = StorageTargetOperations(
+ self._client, self._config, self._serialize, self._deserialize)
+
+ async def close(self) -> None:
+ await self._client.close()
+
+ async def __aenter__(self) -> "StorageCacheManagementClient":
+ await self._client.__aenter__()
+ return self
+
+ async def __aexit__(self, *exc_details) -> None:
+ await self._client.__aexit__(*exc_details)
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/__init__.py
new file mode 100644
index 00000000000..52d521bf575
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/__init__.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._operation_operations import OperationOperations
+from ._sku_operations import SkuOperations
+from ._usage_model_operations import UsageModelOperations
+from ._asc_operation_operations import ASCOperationOperations
+from ._cache_operations import CacheOperations
+from ._storage_target_operations import StorageTargetOperations
+
+__all__ = [
+ 'OperationOperations',
+ 'SkuOperations',
+ 'UsageModelOperations',
+ 'ASCOperationOperations',
+ 'CacheOperations',
+ 'StorageTargetOperations',
+]
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_asc_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_asc_operation_operations.py
new file mode 100644
index 00000000000..85965cfbe6e
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_asc_operation_operations.py
@@ -0,0 +1,99 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, Callable, Dict, Generic, Optional, TypeVar
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class ASCOperationOperations:
+ """ASCOperationOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ async def get(
+ self,
+ location: str,
+ operation_id: str,
+ **kwargs
+ ) -> "models.ASCOperation":
+ """Gets the status of an asynchronous operation for the Azure HPC Cache.
+
+ :param location: The name of the region used to look up the operation.
+ :type location: str
+ :param operation_id: The operation id which uniquely identifies the asynchronous operation.
+ :type operation_id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ASCOperation, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.ASCOperation
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.ASCOperation"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'location': self._serialize.url("location", location, 'str'),
+ 'operationId': self._serialize.url("operation_id", operation_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('ASCOperation', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/locations/{location}/ascOperations/{operationId}'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_cache_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_cache_operations.py
new file mode 100644
index 00000000000..9b1d292217f
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_cache_operations.py
@@ -0,0 +1,1231 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class CacheOperations:
+ """CacheOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs
+ ) -> AsyncIterable["models.CachesListResult"]:
+ """Returns all Caches the user has access to under a subscription.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CachesListResult or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.CachesListResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('CachesListResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches'} # type: ignore
+
+ def list_by_resource_group(
+ self,
+ resource_group_name: str,
+ **kwargs
+ ) -> AsyncIterable["models.CachesListResult"]:
+ """Returns all Caches the user has access to under a resource group.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CachesListResult or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.CachesListResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_by_resource_group.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('CachesListResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches'} # type: ignore
+
+ async def _delete_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> None:
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._delete_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ async def begin_delete(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> AsyncLROPoller[None]:
+ """Schedules a Cache for deletion.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ async def get(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> "models.Cache":
+ """Returns a Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Cache, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.Cache
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ tags: Optional[object] = None,
+ location: Optional[str] = None,
+ cache_size_gb: Optional[int] = None,
+ provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None,
+ subnet: Optional[str] = None,
+ upgrade_status: Optional["models.CacheUpgradeStatus"] = None,
+ extended_groups_enabled: Optional[bool] = None,
+ username_source: Optional[Union[str, "models.UsernameSource"]] = "None",
+ group_file_uri: Optional[str] = None,
+ user_file_uri: Optional[str] = None,
+ ldap_server: Optional[str] = None,
+ ldap_base_dn: Optional[str] = None,
+ encrypt_ldap_connection: Optional[bool] = None,
+ require_valid_certificate: Optional[bool] = None,
+ auto_download_certificate: Optional[bool] = None,
+ ca_certificate_uri: Optional[str] = None,
+ credentials: Optional["models.CacheUsernameDownloadSettingsCredentials"] = None,
+ primary_dns_ip_address: Optional[str] = None,
+ secondary_dns_ip_address: Optional[str] = None,
+ domain_name: Optional[str] = None,
+ domain_net_bios: Optional[str] = None,
+ smb_server_name: Optional[str] = None,
+ access_policies: Optional[List["models.NfsAccessPolicy"]] = None,
+ key_url: Optional[str] = None,
+ id: Optional[str] = None,
+ mtu: Optional[int] = 1500,
+ name: Optional[str] = None,
+ type: Optional[Union[str, "models.CacheIdentityType"]] = None,
+ **kwargs
+ ) -> Optional["models.Cache"]:
+ cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.Cache"]]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, extended_groups_enabled=extended_groups_enabled, username_source=username_source, group_file_uri=group_file_uri, user_file_uri=user_file_uri, ldap_server=ldap_server, ldap_base_dn=ldap_base_dn, encrypt_ldap_connection=encrypt_ldap_connection, require_valid_certificate=require_valid_certificate, auto_download_certificate=auto_download_certificate, ca_certificate_uri=ca_certificate_uri, credentials_directory_services_settings_username_download_credentials=credentials, primary_dns_ip_address=primary_dns_ip_address, secondary_dns_ip_address=secondary_dns_ip_address, domain_name=domain_name, domain_net_bios=domain_net_bios, smb_server_name=smb_server_name, access_policies=access_policies, key_url=key_url, id_encryption_settings_key_encryption_key_source_vault_id=id, mtu=mtu, name_sku_name=name, type_identity_type=type)
+ api_version = "2020-10-01"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self._create_or_update_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ if cache is not None:
+ body_content = self._serialize.body(cache, 'Cache')
+ else:
+ body_content = None
+ body_content_kwargs['content'] = body_content
+ request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if response.status_code == 201:
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ tags: Optional[object] = None,
+ location: Optional[str] = None,
+ cache_size_gb: Optional[int] = None,
+ provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None,
+ subnet: Optional[str] = None,
+ upgrade_status: Optional["models.CacheUpgradeStatus"] = None,
+ extended_groups_enabled: Optional[bool] = None,
+ username_source: Optional[Union[str, "models.UsernameSource"]] = "None",
+ group_file_uri: Optional[str] = None,
+ user_file_uri: Optional[str] = None,
+ ldap_server: Optional[str] = None,
+ ldap_base_dn: Optional[str] = None,
+ encrypt_ldap_connection: Optional[bool] = None,
+ require_valid_certificate: Optional[bool] = None,
+ auto_download_certificate: Optional[bool] = None,
+ ca_certificate_uri: Optional[str] = None,
+ credentials: Optional["models.CacheUsernameDownloadSettingsCredentials"] = None,
+ primary_dns_ip_address: Optional[str] = None,
+ secondary_dns_ip_address: Optional[str] = None,
+ domain_name: Optional[str] = None,
+ domain_net_bios: Optional[str] = None,
+ smb_server_name: Optional[str] = None,
+ access_policies: Optional[List["models.NfsAccessPolicy"]] = None,
+ key_url: Optional[str] = None,
+ id: Optional[str] = None,
+ mtu: Optional[int] = 1500,
+ name: Optional[str] = None,
+ type: Optional[Union[str, "models.CacheIdentityType"]] = None,
+ **kwargs
+ ) -> AsyncLROPoller["models.Cache"]:
+ """Create or update a Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param tags: ARM tags as name/value pairs.
+ :type tags: object
+ :param location: Region name string.
+ :type location: str
+ :param cache_size_gb: The size of this Cache, in GB.
+ :type cache_size_gb: int
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property.
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param subnet: Subnet used for the Cache.
+ :type subnet: str
+ :param upgrade_status: Upgrade status of the Cache.
+ :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus
+ :param extended_groups_enabled: This indicates if Extended Groups is enabled.
+ :type extended_groups_enabled: bool
+ :param username_source: This setting determines how the system gets username and group names
+ for clients.
+ :type username_source: str or ~storage_cache_management_client.models.UsernameSource
+ :param group_file_uri: The URI of the file containing the group information (in /etc/group file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type group_file_uri: str
+ :param user_file_uri: The URI of the file containing the user information (in /etc/passwd file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type user_file_uri: str
+ :param ldap_server: The fully qualified domain name or IP address of the LDAP server to use.
+ :type ldap_server: str
+ :param ldap_base_dn: The base distinguished name for the LDAP domain.
+ :type ldap_base_dn: str
+ :param encrypt_ldap_connection: This indicates if the LDAP connection should be encrypted.
+ :type encrypt_ldap_connection: bool
+ :param require_valid_certificate: Determines if the certificates should be validated by a
+ certificate authority. When true, caCertificateURI must be provided.
+ :type require_valid_certificate: bool
+ :param auto_download_certificate: Determines if the certificate should be automatically
+ downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a
+ self signed certificate otherwise.
+ :type auto_download_certificate: bool
+ :param ca_certificate_uri: The URI of the CA certificate to validate the LDAP secure
+ connection. This field must be populated when 'requireValidCertificate' is set to true.
+ :type ca_certificate_uri: str
+ :param credentials: When present, these are the credentials for the secure LDAP connection.
+ :type credentials: ~storage_cache_management_client.models.CacheUsernameDownloadSettingsCredentials
+ :param primary_dns_ip_address: Primary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type primary_dns_ip_address: str
+ :param secondary_dns_ip_address: Secondary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type secondary_dns_ip_address: str
+ :param domain_name: The fully qualified domain name of the Active Directory domain controller.
+ :type domain_name: str
+ :param domain_net_bios: The Active Directory domain's NetBIOS name.
+ :type domain_net_bios: str
+ :param smb_server_name: The name (NetBIOS) used for the HPC Cache to join the Active Directory
+ domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type smb_server_name: str
+ :param access_policies: NFS access policies defined for this cache.
+ :type access_policies: list[~storage_cache_management_client.models.NfsAccessPolicy]
+ :param key_url: The URL referencing a key encryption key in Key Vault.
+ :type key_url: str
+ :param id: Resource Id.
+ :type id: str
+ :param mtu: The IPv4 maximum transmission unit configured for the subnet.
+ :type mtu: int
+ :param name: SKU name for this Cache.
+ :type name: str
+ :param type: The type of identity used for the cache.
+ :type type: str or ~storage_cache_management_client.models.CacheIdentityType
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either Cache or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~storage_cache_management_client.models.Cache]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ tags=tags,
+ location=location,
+ cache_size_gb=cache_size_gb,
+ provisioning_state=provisioning_state,
+ subnet=subnet,
+ upgrade_status=upgrade_status,
+ extended_groups_enabled=extended_groups_enabled,
+ username_source=username_source,
+ group_file_uri=group_file_uri,
+ user_file_uri=user_file_uri,
+ ldap_server=ldap_server,
+ ldap_base_dn=ldap_base_dn,
+ encrypt_ldap_connection=encrypt_ldap_connection,
+ require_valid_certificate=require_valid_certificate,
+ auto_download_certificate=auto_download_certificate,
+ ca_certificate_uri=ca_certificate_uri,
+ credentials=credentials,
+ primary_dns_ip_address=primary_dns_ip_address,
+ secondary_dns_ip_address=secondary_dns_ip_address,
+ domain_name=domain_name,
+ domain_net_bios=domain_net_bios,
+ smb_server_name=smb_server_name,
+ access_policies=access_policies,
+ key_url=key_url,
+ id=id,
+ mtu=mtu,
+ name=name,
+ type=type,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ async def update(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ tags: Optional[object] = None,
+ location: Optional[str] = None,
+ cache_size_gb: Optional[int] = None,
+ provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None,
+ subnet: Optional[str] = None,
+ upgrade_status: Optional["models.CacheUpgradeStatus"] = None,
+ extended_groups_enabled: Optional[bool] = None,
+ username_source: Optional[Union[str, "models.UsernameSource"]] = "None",
+ group_file_uri: Optional[str] = None,
+ user_file_uri: Optional[str] = None,
+ ldap_server: Optional[str] = None,
+ ldap_base_dn: Optional[str] = None,
+ encrypt_ldap_connection: Optional[bool] = None,
+ require_valid_certificate: Optional[bool] = None,
+ auto_download_certificate: Optional[bool] = None,
+ ca_certificate_uri: Optional[str] = None,
+ credentials: Optional["models.CacheUsernameDownloadSettingsCredentials"] = None,
+ primary_dns_ip_address: Optional[str] = None,
+ secondary_dns_ip_address: Optional[str] = None,
+ domain_name: Optional[str] = None,
+ domain_net_bios: Optional[str] = None,
+ smb_server_name: Optional[str] = None,
+ access_policies: Optional[List["models.NfsAccessPolicy"]] = None,
+ key_url: Optional[str] = None,
+ id: Optional[str] = None,
+ mtu: Optional[int] = 1500,
+ name: Optional[str] = None,
+ type: Optional[Union[str, "models.CacheIdentityType"]] = None,
+ **kwargs
+ ) -> "models.Cache":
+ """Update a Cache instance.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param tags: ARM tags as name/value pairs.
+ :type tags: object
+ :param location: Region name string.
+ :type location: str
+ :param cache_size_gb: The size of this Cache, in GB.
+ :type cache_size_gb: int
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property.
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param subnet: Subnet used for the Cache.
+ :type subnet: str
+ :param upgrade_status: Upgrade status of the Cache.
+ :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus
+ :param extended_groups_enabled: This indicates if Extended Groups is enabled.
+ :type extended_groups_enabled: bool
+ :param username_source: This setting determines how the system gets username and group names
+ for clients.
+ :type username_source: str or ~storage_cache_management_client.models.UsernameSource
+ :param group_file_uri: The URI of the file containing the group information (in /etc/group file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type group_file_uri: str
+ :param user_file_uri: The URI of the file containing the user information (in /etc/passwd file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type user_file_uri: str
+ :param ldap_server: The fully qualified domain name or IP address of the LDAP server to use.
+ :type ldap_server: str
+ :param ldap_base_dn: The base distinguished name for the LDAP domain.
+ :type ldap_base_dn: str
+ :param encrypt_ldap_connection: This indicates if the LDAP connection should be encrypted.
+ :type encrypt_ldap_connection: bool
+ :param require_valid_certificate: Determines if the certificates should be validated by a
+ certificate authority. When true, caCertificateURI must be provided.
+ :type require_valid_certificate: bool
+ :param auto_download_certificate: Determines if the certificate should be automatically
+ downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a
+ self signed certificate otherwise.
+ :type auto_download_certificate: bool
+ :param ca_certificate_uri: The URI of the CA certificate to validate the LDAP secure
+ connection. This field must be populated when 'requireValidCertificate' is set to true.
+ :type ca_certificate_uri: str
+ :param credentials: When present, these are the credentials for the secure LDAP connection.
+ :type credentials: ~storage_cache_management_client.models.CacheUsernameDownloadSettingsCredentials
+ :param primary_dns_ip_address: Primary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type primary_dns_ip_address: str
+ :param secondary_dns_ip_address: Secondary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type secondary_dns_ip_address: str
+ :param domain_name: The fully qualified domain name of the Active Directory domain controller.
+ :type domain_name: str
+ :param domain_net_bios: The Active Directory domain's NetBIOS name.
+ :type domain_net_bios: str
+ :param smb_server_name: The name (NetBIOS) used for the HPC Cache to join the Active Directory
+ domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type smb_server_name: str
+ :param access_policies: NFS access policies defined for this cache.
+ :type access_policies: list[~storage_cache_management_client.models.NfsAccessPolicy]
+ :param key_url: The URL referencing a key encryption key in Key Vault.
+ :type key_url: str
+ :param id: Resource Id.
+ :type id: str
+ :param mtu: The IPv4 maximum transmission unit configured for the subnet.
+ :type mtu: int
+ :param name: SKU name for this Cache.
+ :type name: str
+ :param type: The type of identity used for the cache.
+ :type type: str or ~storage_cache_management_client.models.CacheIdentityType
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Cache, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.Cache
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, extended_groups_enabled=extended_groups_enabled, username_source=username_source, group_file_uri=group_file_uri, user_file_uri=user_file_uri, ldap_server=ldap_server, ldap_base_dn=ldap_base_dn, encrypt_ldap_connection=encrypt_ldap_connection, require_valid_certificate=require_valid_certificate, auto_download_certificate=auto_download_certificate, ca_certificate_uri=ca_certificate_uri, credentials_directory_services_settings_username_download_credentials=credentials, primary_dns_ip_address=primary_dns_ip_address, secondary_dns_ip_address=secondary_dns_ip_address, domain_name=domain_name, domain_net_bios=domain_net_bios, smb_server_name=smb_server_name, access_policies=access_policies, key_url=key_url, id_encryption_settings_key_encryption_key_source_vault_id=id, mtu=mtu, name_sku_name=name, type_identity_type=type)
+ api_version = "2020-10-01"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self.update.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ if cache is not None:
+ body_content = self._serialize.body(cache, 'Cache')
+ else:
+ body_content = None
+ body_content_kwargs['content'] = body_content
+ request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ async def _flush_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> None:
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._flush_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _flush_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore
+
+ async def begin_flush(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> AsyncLROPoller[None]:
+ """Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will
+ see errors returned until the flush is complete.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._flush_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_flush.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore
+
+ async def _start_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> None:
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._start_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore
+
+ async def begin_start(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> AsyncLROPoller[None]:
+ """Tells a Stopped state Cache to transition to Active state.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._start_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore
+
+ async def _stop_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> None:
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._stop_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore
+
+ async def begin_stop(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> AsyncLROPoller[None]:
+ """Tells an Active Cache to transition to Stopped state.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._stop_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore
+
+ async def _upgrade_firmware_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> None:
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._upgrade_firmware_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _upgrade_firmware_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore
+
+ async def begin_upgrade_firmware(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> AsyncLROPoller[None]:
+ """Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no
+ effect.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._upgrade_firmware_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_upgrade_firmware.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_operation_operations.py
new file mode 100644
index 00000000000..28addb27d83
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_operation_operations.py
@@ -0,0 +1,104 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class OperationOperations:
+ """OperationOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs
+ ) -> AsyncIterable["models.ApiOperationListResult"]:
+ """Lists all of the available Resource Provider operations.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either ApiOperationListResult or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ApiOperationListResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.ApiOperationListResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('ApiOperationListResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/providers/Microsoft.StorageCache/operations'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_sku_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_sku_operations.py
new file mode 100644
index 00000000000..b09e61724d1
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_sku_operations.py
@@ -0,0 +1,108 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class SkuOperations:
+ """SkuOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs
+ ) -> AsyncIterable["models.ResourceSkusResult"]:
+ """Get the list of StorageCache.Cache SKUs available to this subscription.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either ResourceSkusResult or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.ResourceSkusResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceSkusResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('ResourceSkusResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_storage_target_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_storage_target_operations.py
new file mode 100644
index 00000000000..317f3598b8b
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_storage_target_operations.py
@@ -0,0 +1,474 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
+
+from ... import models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class StorageTargetOperations:
+ """StorageTargetOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list_by_cache(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ **kwargs
+ ) -> AsyncIterable["models.StorageTargetsResult"]:
+ """Returns a list of Storage Targets for the specified Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either StorageTargetsResult or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.StorageTargetsResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTargetsResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_by_cache.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('StorageTargetsResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list_by_cache.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets'} # type: ignore
+
+ async def _delete_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ storage_target_name: str,
+ **kwargs
+ ) -> None:
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._delete_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ async def begin_delete(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ storage_target_name: str,
+ **kwargs
+ ) -> AsyncLROPoller[None]:
+ """Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache
+ is down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache
+ is healthy again. Note that if the Cache has data to flush to the Storage Target, the data will
+ be flushed before the Storage Target will be deleted.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param storage_target_name: Name of Storage Target.
+ :type storage_target_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._delete_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ async def get(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ storage_target_name: str,
+ **kwargs
+ ) -> "models.StorageTarget":
+ """Returns a Storage Target from a Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param storage_target_name: Name of the Storage Target. Length of name must not be greater than
+ 80 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type storage_target_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: StorageTarget, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.StorageTarget
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ async def _create_or_update_initial(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ storage_target_name: str,
+ junctions: Optional[List["models.NamespaceJunction"]] = None,
+ target_type: Optional[Union[str, "models.StorageTargetType"]] = None,
+ provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None,
+ nfs3: Optional["models.Nfs3Target"] = None,
+ unknown_map: Optional[Dict[str, str]] = None,
+ target: Optional[str] = None,
+ **kwargs
+ ) -> Optional["models.StorageTarget"]:
+ cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.StorageTarget"]]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ storagetarget = models.StorageTarget(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, unknown_map=unknown_map, target=target)
+ api_version = "2020-10-01"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self._create_or_update_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ if storagetarget is not None:
+ body_content = self._serialize.body(storagetarget, 'StorageTarget')
+ else:
+ body_content = None
+ body_content_kwargs['content'] = body_content
+ request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if response.status_code == 201:
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ async def begin_create_or_update(
+ self,
+ resource_group_name: str,
+ cache_name: str,
+ storage_target_name: str,
+ junctions: Optional[List["models.NamespaceJunction"]] = None,
+ target_type: Optional[Union[str, "models.StorageTargetType"]] = None,
+ provisioning_state: Optional[Union[str, "models.ProvisioningStateType"]] = None,
+ nfs3: Optional["models.Nfs3Target"] = None,
+ unknown_map: Optional[Dict[str, str]] = None,
+ target: Optional[str] = None,
+ **kwargs
+ ) -> AsyncLROPoller["models.StorageTarget"]:
+ """Create or update a Storage Target. This operation is allowed at any time, but if the Cache is
+ down or unhealthy, the actual creation/modification of the Storage Target may be delayed until
+ the Cache is healthy again.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param storage_target_name: Name of the Storage Target. Length of name must not be greater than
+ 80 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type storage_target_name: str
+ :param junctions: List of Cache namespace junctions to target for namespace associations.
+ :type junctions: list[~storage_cache_management_client.models.NamespaceJunction]
+ :param target_type: Type of the Storage Target.
+ :type target_type: str or ~storage_cache_management_client.models.StorageTargetType
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property.
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param nfs3: Properties when targetType is nfs3.
+ :type nfs3: ~storage_cache_management_client.models.Nfs3Target
+ :param unknown_map: Dictionary of string->string pairs containing information about the Storage
+ Target.
+ :type unknown_map: dict[str, str]
+ :param target: Resource ID of storage container.
+ :type target: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of AsyncLROPoller that returns either StorageTarget or the result of cls(response)
+ :rtype: ~azure.core.polling.AsyncLROPoller[~storage_cache_management_client.models.StorageTarget]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = await self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name,
+ junctions=junctions,
+ target_type=target_type,
+ provisioning_state=provisioning_state,
+ nfs3=nfs3,
+ unknown_map=unknown_map,
+ target=target,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = AsyncNoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return AsyncLROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_usage_model_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_usage_model_operations.py
new file mode 100644
index 00000000000..0496ca26bc6
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/aio/operations/_usage_model_operations.py
@@ -0,0 +1,108 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
+import warnings
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from ... import models
+
+T = TypeVar('T')
+ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
+
+class UsageModelOperations:
+ """UsageModelOperations async operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer) -> None:
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs
+ ) -> AsyncIterable["models.UsageModelsResult"]:
+ """Get the list of Cache Usage Models available to this subscription.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either UsageModelsResult or the result of cls(response)
+ :rtype: ~azure.core.async_paging.AsyncItemPaged[~storage_cache_management_client.models.UsageModelsResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.UsageModelsResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ async def extract_data(pipeline_response):
+ deserialized = self._deserialize('UsageModelsResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, AsyncList(list_of_elem)
+
+ async def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return AsyncItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py
new file mode 100644
index 00000000000..1448a04c21d
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/__init__.py
@@ -0,0 +1,135 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+try:
+ from ._models_py3 import ASCOperation
+ from ._models_py3 import ApiOperation
+ from ._models_py3 import ApiOperationDisplay
+ from ._models_py3 import ApiOperationListResult
+ from ._models_py3 import ApiOperationPropertiesServiceSpecification
+ from ._models_py3 import Cache
+ from ._models_py3 import CacheActiveDirectorySettingsCredentials
+ from ._models_py3 import CacheHealth
+ from ._models_py3 import CacheUpgradeStatus
+ from ._models_py3 import CacheUsernameDownloadSettingsCredentials
+ from ._models_py3 import CachesListResult
+ from ._models_py3 import CloudErrorBody
+ from ._models_py3 import ErrorResponse
+ from ._models_py3 import MetricDimension
+ from ._models_py3 import MetricSpecification
+ from ._models_py3 import NamespaceJunction
+ from ._models_py3 import Nfs3Target
+ from ._models_py3 import NfsAccessPolicy
+ from ._models_py3 import NfsAccessRule
+ from ._models_py3 import ResourceSku
+ from ._models_py3 import ResourceSkuCapabilities
+ from ._models_py3 import ResourceSkuLocationInfo
+ from ._models_py3 import ResourceSkusResult
+ from ._models_py3 import Restriction
+ from ._models_py3 import StorageTarget
+ from ._models_py3 import StorageTargetResource
+ from ._models_py3 import StorageTargetsResult
+ from ._models_py3 import SystemData
+ from ._models_py3 import UsageModel
+ from ._models_py3 import UsageModelDisplay
+ from ._models_py3 import UsageModelsResult
+except (SyntaxError, ImportError):
+ from ._models import ASCOperation # type: ignore
+ from ._models import ApiOperation # type: ignore
+ from ._models import ApiOperationDisplay # type: ignore
+ from ._models import ApiOperationListResult # type: ignore
+ from ._models import ApiOperationPropertiesServiceSpecification # type: ignore
+ from ._models import Cache # type: ignore
+ from ._models import CacheActiveDirectorySettingsCredentials # type: ignore
+ from ._models import CacheHealth # type: ignore
+ from ._models import CacheUpgradeStatus # type: ignore
+ from ._models import CacheUsernameDownloadSettingsCredentials # type: ignore
+ from ._models import CachesListResult # type: ignore
+ from ._models import CloudErrorBody # type: ignore
+ from ._models import ErrorResponse # type: ignore
+ from ._models import MetricDimension # type: ignore
+ from ._models import MetricSpecification # type: ignore
+ from ._models import NamespaceJunction # type: ignore
+ from ._models import Nfs3Target # type: ignore
+ from ._models import NfsAccessPolicy # type: ignore
+ from ._models import NfsAccessRule # type: ignore
+ from ._models import ResourceSku # type: ignore
+ from ._models import ResourceSkuCapabilities # type: ignore
+ from ._models import ResourceSkuLocationInfo # type: ignore
+ from ._models import ResourceSkusResult # type: ignore
+ from ._models import Restriction # type: ignore
+ from ._models import StorageTarget # type: ignore
+ from ._models import StorageTargetResource # type: ignore
+ from ._models import StorageTargetsResult # type: ignore
+ from ._models import SystemData # type: ignore
+ from ._models import UsageModel # type: ignore
+ from ._models import UsageModelDisplay # type: ignore
+ from ._models import UsageModelsResult # type: ignore
+
+from ._storage_cache_management_client_enums import (
+ CacheIdentityType,
+ CreatedByType,
+ DomainJoinedType,
+ FirmwareStatusType,
+ HealthStateType,
+ MetricAggregationType,
+ NfsAccessRuleAccess,
+ NfsAccessRuleScope,
+ ProvisioningStateType,
+ ReasonCode,
+ StorageTargetType,
+ UsernameDownloadedType,
+ UsernameSource,
+)
+
+__all__ = [
+ 'ASCOperation',
+ 'ApiOperation',
+ 'ApiOperationDisplay',
+ 'ApiOperationListResult',
+ 'ApiOperationPropertiesServiceSpecification',
+ 'Cache',
+ 'CacheActiveDirectorySettingsCredentials',
+ 'CacheHealth',
+ 'CacheUpgradeStatus',
+ 'CacheUsernameDownloadSettingsCredentials',
+ 'CachesListResult',
+ 'CloudErrorBody',
+ 'ErrorResponse',
+ 'MetricDimension',
+ 'MetricSpecification',
+ 'NamespaceJunction',
+ 'Nfs3Target',
+ 'NfsAccessPolicy',
+ 'NfsAccessRule',
+ 'ResourceSku',
+ 'ResourceSkuCapabilities',
+ 'ResourceSkuLocationInfo',
+ 'ResourceSkusResult',
+ 'Restriction',
+ 'StorageTarget',
+ 'StorageTargetResource',
+ 'StorageTargetsResult',
+ 'SystemData',
+ 'UsageModel',
+ 'UsageModelDisplay',
+ 'UsageModelsResult',
+ 'CacheIdentityType',
+ 'CreatedByType',
+ 'DomainJoinedType',
+ 'FirmwareStatusType',
+ 'HealthStateType',
+ 'MetricAggregationType',
+ 'NfsAccessRuleAccess',
+ 'NfsAccessRuleScope',
+ 'ProvisioningStateType',
+ 'ReasonCode',
+ 'StorageTargetType',
+ 'UsernameDownloadedType',
+ 'UsernameSource',
+]
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py
new file mode 100644
index 00000000000..31d9708dcdb
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models.py
@@ -0,0 +1,1205 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import msrest.serialization
+
+
+class ApiOperation(msrest.serialization.Model):
+ """REST API operation description: see https://github.com/Azure/azure-rest-api-specs/blob/master/documentation/openapi-authoring-automated-guidelines.md#r3023-operationsapiimplementation.
+
+ :param display: The object that represents the operation.
+ :type display: ~storage_cache_management_client.models.ApiOperationDisplay
+ :param origin: Origin of the operation.
+ :type origin: str
+ :param is_data_action: The flag that indicates whether the operation applies to data plane.
+ :type is_data_action: bool
+ :param name: Operation name: {provider}/{resource}/{operation}.
+ :type name: str
+ :param service_specification: Specification of the all the metrics provided for a resource
+ type.
+ :type service_specification:
+ ~storage_cache_management_client.models.ApiOperationPropertiesServiceSpecification
+ """
+
+ _attribute_map = {
+ 'display': {'key': 'display', 'type': 'ApiOperationDisplay'},
+ 'origin': {'key': 'origin', 'type': 'str'},
+ 'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ApiOperationPropertiesServiceSpecification'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ApiOperation, self).__init__(**kwargs)
+ self.display = kwargs.get('display', None)
+ self.origin = kwargs.get('origin', None)
+ self.is_data_action = kwargs.get('is_data_action', None)
+ self.name = kwargs.get('name', None)
+ self.service_specification = kwargs.get('service_specification', None)
+
+
+class ApiOperationDisplay(msrest.serialization.Model):
+ """The object that represents the operation.
+
+ :param operation: Operation type: Read, write, delete, etc.
+ :type operation: str
+ :param provider: Service provider: Microsoft.StorageCache.
+ :type provider: str
+ :param resource: Resource on which the operation is performed: Cache, etc.
+ :type resource: str
+ :param description: The description of the operation.
+ :type description: str
+ """
+
+ _attribute_map = {
+ 'operation': {'key': 'operation', 'type': 'str'},
+ 'provider': {'key': 'provider', 'type': 'str'},
+ 'resource': {'key': 'resource', 'type': 'str'},
+ 'description': {'key': 'description', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ApiOperationDisplay, self).__init__(**kwargs)
+ self.operation = kwargs.get('operation', None)
+ self.provider = kwargs.get('provider', None)
+ self.resource = kwargs.get('resource', None)
+ self.description = kwargs.get('description', None)
+
+
+class ApiOperationListResult(msrest.serialization.Model):
+ """Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results.
+
+ :param next_link: URL to get the next set of operation list results if there are any.
+ :type next_link: str
+ :param value: List of Resource Provider operations supported by the Microsoft.StorageCache
+ resource provider.
+ :type value: list[~storage_cache_management_client.models.ApiOperation]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[ApiOperation]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ApiOperationListResult, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+ self.value = kwargs.get('value', None)
+
+
+class ApiOperationPropertiesServiceSpecification(msrest.serialization.Model):
+ """Specification of the all the metrics provided for a resource type.
+
+ :param metric_specifications: Details about operations related to metrics.
+ :type metric_specifications: list[~storage_cache_management_client.models.MetricSpecification]
+ """
+
+ _attribute_map = {
+ 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ApiOperationPropertiesServiceSpecification, self).__init__(**kwargs)
+ self.metric_specifications = kwargs.get('metric_specifications', None)
+
+
+class ASCOperation(msrest.serialization.Model):
+ """The status of operation.
+
+ :param id: The operation Id.
+ :type id: str
+ :param name: The operation name.
+ :type name: str
+ :param start_time: The start time of the operation.
+ :type start_time: str
+ :param end_time: The end time of the operation.
+ :type end_time: str
+ :param status: The status of the operation.
+ :type status: str
+ :param error: The error detail of the operation if any.
+ :type error: ~storage_cache_management_client.models.ErrorResponse
+ """
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'start_time': {'key': 'startTime', 'type': 'str'},
+ 'end_time': {'key': 'endTime', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'ErrorResponse'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ASCOperation, self).__init__(**kwargs)
+ self.id = kwargs.get('id', None)
+ self.name = kwargs.get('name', None)
+ self.start_time = kwargs.get('start_time', None)
+ self.end_time = kwargs.get('end_time', None)
+ self.status = kwargs.get('status', None)
+ self.error = kwargs.get('error', None)
+
+
+class Cache(msrest.serialization.Model):
+ """A Cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :param tags: A set of tags. ARM tags as name/value pairs.
+ :type tags: object
+ :ivar id: Resource ID of the Cache.
+ :vartype id: str
+ :param location: Region name string.
+ :type location: str
+ :ivar name: Name of Cache.
+ :vartype name: str
+ :ivar type: Type of the Cache; Microsoft.StorageCache/Cache.
+ :vartype type: str
+ :ivar system_data: The system meta data relating to this resource.
+ :vartype system_data: ~storage_cache_management_client.models.SystemData
+ :param cache_size_gb: The size of this Cache, in GB.
+ :type cache_size_gb: int
+ :ivar health: Health of the Cache.
+ :vartype health: ~storage_cache_management_client.models.CacheHealth
+ :ivar mount_addresses: Array of IP addresses that can be used by clients mounting this Cache.
+ :vartype mount_addresses: list[str]
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include:
+ "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating".
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param subnet: Subnet used for the Cache.
+ :type subnet: str
+ :param upgrade_status: Upgrade status of the Cache.
+ :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus
+ :param extended_groups_enabled: This indicates if Extended Groups is enabled.
+ :type extended_groups_enabled: bool
+ :param username_source: This setting determines how the system gets username and group names
+ for clients. Possible values include: "AD", "LDAP", "File", "None". Default value: "None".
+ :type username_source: str or ~storage_cache_management_client.models.UsernameSource
+ :param group_file_uri: The URI of the file containing the group information (in /etc/group file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type group_file_uri: str
+ :param user_file_uri: The URI of the file containing the user information (in /etc/passwd file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type user_file_uri: str
+ :param ldap_server: The fully qualified domain name or IP address of the LDAP server to use.
+ :type ldap_server: str
+ :param ldap_base_dn: The base distinguished name for the LDAP domain.
+ :type ldap_base_dn: str
+ :param encrypt_ldap_connection: This indicates if the LDAP connection should be encrypted.
+ :type encrypt_ldap_connection: bool
+ :param require_valid_certificate: Determines if the certificates should be validated by a
+ certificate authority. When true, caCertificateURI must be provided.
+ :type require_valid_certificate: bool
+ :param auto_download_certificate: Determines if the certificate should be automatically
+ downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a
+ self signed certificate otherwise.
+ :type auto_download_certificate: bool
+ :param ca_certificate_uri: The URI of the CA certificate to validate the LDAP secure
+ connection. This field must be populated when 'requireValidCertificate' is set to true.
+ :type ca_certificate_uri: str
+ :ivar username_downloaded: Indicates if the HPC Cache has performed the username download
+ successfully. Possible values include: "Yes", "No", "Error".
+ :vartype username_downloaded: str or
+ ~storage_cache_management_client.models.UsernameDownloadedType
+ :param credentials_directory_services_settings_username_download_credentials: When present,
+ these are the credentials for the secure LDAP connection.
+ :type credentials_directory_services_settings_username_download_credentials:
+ ~storage_cache_management_client.models.CacheUsernameDownloadSettingsCredentials
+ :param primary_dns_ip_address: Primary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type primary_dns_ip_address: str
+ :param secondary_dns_ip_address: Secondary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type secondary_dns_ip_address: str
+ :param domain_name: The fully qualified domain name of the Active Directory domain controller.
+ :type domain_name: str
+ :param domain_net_bios: The Active Directory domain's NetBIOS name.
+ :type domain_net_bios: str
+ :param smb_server_name: The name (NetBIOS) used for the HPC Cache to join the Active Directory
+ domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type smb_server_name: str
+ :ivar domain_joined: This field indicates if the HPC Cache is joined to the Active Directory
+ domain. Possible values include: "Yes", "No", "Error".
+ :vartype domain_joined: str or ~storage_cache_management_client.models.DomainJoinedType
+ :param credentials_directory_services_settings_active_directory_credentials: Active Directory
+ admin or user credentials used to join the HPC Cache to a domain.
+ :type credentials_directory_services_settings_active_directory_credentials:
+ ~storage_cache_management_client.models.CacheActiveDirectorySettingsCredentials
+ :param access_policies: NFS access policies defined for this cache.
+ :type access_policies: list[~storage_cache_management_client.models.NfsAccessPolicy]
+ :param key_url: The URL referencing a key encryption key in Key Vault.
+ :type key_url: str
+ :param id_encryption_settings_key_encryption_key_source_vault_id: Resource Id.
+ :type id_encryption_settings_key_encryption_key_source_vault_id: str
+ :param mtu: The IPv4 maximum transmission unit configured for the subnet.
+ :type mtu: int
+ :ivar utility_addresses: Array of additional IP addresses used by this Cache.
+ :vartype utility_addresses: list[str]
+ :param name_sku_name: SKU name for this Cache.
+ :type name_sku_name: str
+ :ivar principal_id: The principal id of the cache.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant id associated with the cache.
+ :vartype tenant_id: str
+ :param type_identity_type: The type of identity used for the cache. Possible values include:
+ "SystemAssigned", "None".
+ :type type_identity_type: str or ~storage_cache_management_client.models.CacheIdentityType
+ """
+
+ _validation = {
+ 'id': {'readonly': True},
+ 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'},
+ 'type': {'readonly': True},
+ 'system_data': {'readonly': True},
+ 'health': {'readonly': True},
+ 'mount_addresses': {'readonly': True},
+ 'username_downloaded': {'readonly': True},
+ 'smb_server_name': {'pattern': r'^[-0-9a-zA-Z]{1,15}$'},
+ 'domain_joined': {'readonly': True},
+ 'mtu': {'maximum': 1500, 'minimum': 576},
+ 'utility_addresses': {'readonly': True},
+ 'principal_id': {'readonly': True},
+ 'tenant_id': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'tags': {'key': 'tags', 'type': 'object'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'location': {'key': 'location', 'type': 'str'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'system_data': {'key': 'systemData', 'type': 'SystemData'},
+ 'cache_size_gb': {'key': 'properties.cacheSizeGB', 'type': 'int'},
+ 'health': {'key': 'properties.health', 'type': 'CacheHealth'},
+ 'mount_addresses': {'key': 'properties.mountAddresses', 'type': '[str]'},
+ 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
+ 'subnet': {'key': 'properties.subnet', 'type': 'str'},
+ 'upgrade_status': {'key': 'properties.upgradeStatus', 'type': 'CacheUpgradeStatus'},
+ 'extended_groups_enabled': {'key': 'directoryServicesSettings.usernameDownload.extendedGroupsEnabled', 'type': 'bool'},
+ 'username_source': {'key': 'directoryServicesSettings.usernameDownload.usernameSource', 'type': 'str'},
+ 'group_file_uri': {'key': 'directoryServicesSettings.usernameDownload.groupFileURI', 'type': 'str'},
+ 'user_file_uri': {'key': 'directoryServicesSettings.usernameDownload.userFileURI', 'type': 'str'},
+ 'ldap_server': {'key': 'directoryServicesSettings.usernameDownload.ldapServer', 'type': 'str'},
+ 'ldap_base_dn': {'key': 'directoryServicesSettings.usernameDownload.ldapBaseDn', 'type': 'str'},
+ 'encrypt_ldap_connection': {'key': 'directoryServicesSettings.usernameDownload.encryptLdapConnection', 'type': 'bool'},
+ 'require_valid_certificate': {'key': 'directoryServicesSettings.usernameDownload.requireValidCertificate', 'type': 'bool'},
+ 'auto_download_certificate': {'key': 'directoryServicesSettings.usernameDownload.autoDownloadCertificate', 'type': 'bool'},
+ 'ca_certificate_uri': {'key': 'directoryServicesSettings.usernameDownload.caCertificateURI', 'type': 'str'},
+ 'username_downloaded': {'key': 'directoryServicesSettings.usernameDownload.usernameDownloaded', 'type': 'str'},
+ 'credentials_directory_services_settings_username_download_credentials': {'key': 'directoryServicesSettings.usernameDownload.credentials', 'type': 'CacheUsernameDownloadSettingsCredentials'},
+ 'primary_dns_ip_address': {'key': 'directoryServicesSettings.activeDirectory.primaryDnsIpAddress', 'type': 'str'},
+ 'secondary_dns_ip_address': {'key': 'directoryServicesSettings.activeDirectory.secondaryDnsIpAddress', 'type': 'str'},
+ 'domain_name': {'key': 'directoryServicesSettings.activeDirectory.domainName', 'type': 'str'},
+ 'domain_net_bios': {'key': 'directoryServicesSettings.activeDirectory.domainNetBios', 'type': 'str'},
+ 'smb_server_name': {'key': 'directoryServicesSettings.activeDirectory.smbServerName', 'type': 'str'},
+ 'domain_joined': {'key': 'directoryServicesSettings.activeDirectory.domainJoined', 'type': 'str'},
+ 'credentials_directory_services_settings_active_directory_credentials': {'key': 'directoryServicesSettings.activeDirectory.credentials', 'type': 'CacheActiveDirectorySettingsCredentials'},
+ 'access_policies': {'key': 'securitySettings.accessPolicies', 'type': '[NfsAccessPolicy]'},
+ 'key_url': {'key': 'encryptionSettings.keyEncryptionKey.keyUrl', 'type': 'str'},
+ 'id_encryption_settings_key_encryption_key_source_vault_id': {'key': 'encryptionSettings.keyEncryptionKey.sourceVault.id', 'type': 'str'},
+ 'mtu': {'key': 'networkSettings.mtu', 'type': 'int'},
+ 'utility_addresses': {'key': 'networkSettings.utilityAddresses', 'type': '[str]'},
+ 'name_sku_name': {'key': 'sku.name', 'type': 'str'},
+ 'principal_id': {'key': 'identity.principalId', 'type': 'str'},
+ 'tenant_id': {'key': 'identity.tenantId', 'type': 'str'},
+ 'type_identity_type': {'key': 'identity.type', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(Cache, self).__init__(**kwargs)
+ self.tags = kwargs.get('tags', None)
+ self.id = None
+ self.location = kwargs.get('location', None)
+ self.name = None
+ self.type = None
+ self.system_data = None
+ self.cache_size_gb = kwargs.get('cache_size_gb', None)
+ self.health = None
+ self.mount_addresses = None
+ self.provisioning_state = kwargs.get('provisioning_state', None)
+ self.subnet = kwargs.get('subnet', None)
+ self.upgrade_status = kwargs.get('upgrade_status', None)
+ self.extended_groups_enabled = kwargs.get('extended_groups_enabled', None)
+ self.username_source = kwargs.get('username_source', "None")
+ self.group_file_uri = kwargs.get('group_file_uri', None)
+ self.user_file_uri = kwargs.get('user_file_uri', None)
+ self.ldap_server = kwargs.get('ldap_server', None)
+ self.ldap_base_dn = kwargs.get('ldap_base_dn', None)
+ self.encrypt_ldap_connection = kwargs.get('encrypt_ldap_connection', None)
+ self.require_valid_certificate = kwargs.get('require_valid_certificate', None)
+ self.auto_download_certificate = kwargs.get('auto_download_certificate', None)
+ self.ca_certificate_uri = kwargs.get('ca_certificate_uri', None)
+ self.username_downloaded = None
+ self.credentials_directory_services_settings_username_download_credentials = kwargs.get('credentials_directory_services_settings_username_download_credentials', None)
+ self.primary_dns_ip_address = kwargs.get('primary_dns_ip_address', None)
+ self.secondary_dns_ip_address = kwargs.get('secondary_dns_ip_address', None)
+ self.domain_name = kwargs.get('domain_name', None)
+ self.domain_net_bios = kwargs.get('domain_net_bios', None)
+ self.smb_server_name = kwargs.get('smb_server_name', None)
+ self.domain_joined = None
+ self.credentials_directory_services_settings_active_directory_credentials = kwargs.get('credentials_directory_services_settings_active_directory_credentials', None)
+ self.access_policies = kwargs.get('access_policies', None)
+ self.key_url = kwargs.get('key_url', None)
+ self.id_encryption_settings_key_encryption_key_source_vault_id = kwargs.get('id_encryption_settings_key_encryption_key_source_vault_id', None)
+ self.mtu = kwargs.get('mtu', 1500)
+ self.utility_addresses = None
+ self.name_sku_name = kwargs.get('name_sku_name', None)
+ self.principal_id = None
+ self.tenant_id = None
+ self.type_identity_type = kwargs.get('type_identity_type', None)
+
+
+class CacheActiveDirectorySettingsCredentials(msrest.serialization.Model):
+ """Active Directory admin or user credentials used to join the HPC Cache to a domain.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param username: Required. User name of the Active Directory domain administrator. This value
+ is stored encrypted and not returned on response.
+ :type username: str
+ :param password: Required. Plain text password of the Active Directory domain administrator.
+ This value is stored encrypted and not returned on response.
+ :type password: str
+ """
+
+ _validation = {
+ 'username': {'required': True},
+ 'password': {'required': True},
+ }
+
+ _attribute_map = {
+ 'username': {'key': 'username', 'type': 'str'},
+ 'password': {'key': 'password', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CacheActiveDirectorySettingsCredentials, self).__init__(**kwargs)
+ self.username = kwargs['username']
+ self.password = kwargs['password']
+
+
+class CacheHealth(msrest.serialization.Model):
+ """An indication of Cache health. Gives more information about health than just that related to provisioning.
+
+ :param state: List of Cache health states. Possible values include: "Unknown", "Healthy",
+ "Degraded", "Down", "Transitioning", "Stopping", "Stopped", "Upgrading", "Flushing".
+ :type state: str or ~storage_cache_management_client.models.HealthStateType
+ :param status_description: Describes explanation of state.
+ :type status_description: str
+ """
+
+ _attribute_map = {
+ 'state': {'key': 'state', 'type': 'str'},
+ 'status_description': {'key': 'statusDescription', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CacheHealth, self).__init__(**kwargs)
+ self.state = kwargs.get('state', None)
+ self.status_description = kwargs.get('status_description', None)
+
+
+class CachesListResult(msrest.serialization.Model):
+ """Result of the request to list Caches. It contains a list of Caches and a URL link to get the next set of results.
+
+ :param next_link: URL to get the next set of Cache list results, if there are any.
+ :type next_link: str
+ :param value: List of Caches.
+ :type value: list[~storage_cache_management_client.models.Cache]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[Cache]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CachesListResult, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+ self.value = kwargs.get('value', None)
+
+
+class CacheUpgradeStatus(msrest.serialization.Model):
+ """Properties describing the software upgrade state of the Cache.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar current_firmware_version: Version string of the firmware currently installed on this
+ Cache.
+ :vartype current_firmware_version: str
+ :ivar firmware_update_status: True if there is a firmware update ready to install on this
+ Cache. The firmware will automatically be installed after firmwareUpdateDeadline if not
+ triggered earlier via the upgrade operation. Possible values include: "available",
+ "unavailable".
+ :vartype firmware_update_status: str or
+ ~storage_cache_management_client.models.FirmwareStatusType
+ :ivar firmware_update_deadline: Time at which the pending firmware update will automatically be
+ installed on the Cache.
+ :vartype firmware_update_deadline: ~datetime.datetime
+ :ivar last_firmware_update: Time of the last successful firmware update.
+ :vartype last_firmware_update: ~datetime.datetime
+ :ivar pending_firmware_version: When firmwareUpdateAvailable is true, this field holds the
+ version string for the update.
+ :vartype pending_firmware_version: str
+ """
+
+ _validation = {
+ 'current_firmware_version': {'readonly': True},
+ 'firmware_update_status': {'readonly': True},
+ 'firmware_update_deadline': {'readonly': True},
+ 'last_firmware_update': {'readonly': True},
+ 'pending_firmware_version': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'current_firmware_version': {'key': 'currentFirmwareVersion', 'type': 'str'},
+ 'firmware_update_status': {'key': 'firmwareUpdateStatus', 'type': 'str'},
+ 'firmware_update_deadline': {'key': 'firmwareUpdateDeadline', 'type': 'iso-8601'},
+ 'last_firmware_update': {'key': 'lastFirmwareUpdate', 'type': 'iso-8601'},
+ 'pending_firmware_version': {'key': 'pendingFirmwareVersion', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CacheUpgradeStatus, self).__init__(**kwargs)
+ self.current_firmware_version = None
+ self.firmware_update_status = None
+ self.firmware_update_deadline = None
+ self.last_firmware_update = None
+ self.pending_firmware_version = None
+
+
+class CacheUsernameDownloadSettingsCredentials(msrest.serialization.Model):
+ """When present, these are the credentials for the secure LDAP connection.
+
+ :param bind_dn: The Bind distinguished name identity to be used in the secure LDAP connection.
+ This value is stored encrypted and not returned on response.
+ :type bind_dn: str
+ :param bind_password: The Bind password to be used in the secure LDAP connection. This value is
+ stored encrypted and not returned on response.
+ :type bind_password: str
+ """
+
+ _attribute_map = {
+ 'bind_dn': {'key': 'bindDn', 'type': 'str'},
+ 'bind_password': {'key': 'bindPassword', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CacheUsernameDownloadSettingsCredentials, self).__init__(**kwargs)
+ self.bind_dn = kwargs.get('bind_dn', None)
+ self.bind_password = kwargs.get('bind_password', None)
+
+
+class CloudErrorBody(msrest.serialization.Model):
+ """An error response.
+
+ :param code: An identifier for the error. Codes are invariant and are intended to be consumed
+ programmatically.
+ :type code: str
+ :param details: A list of additional details about the error.
+ :type details: list[~storage_cache_management_client.models.CloudErrorBody]
+ :param message: A message describing the error, intended to be suitable for display in a user
+ interface.
+ :type message: str
+ :param target: The target of the particular error. For example, the name of the property in
+ error.
+ :type target: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '[CloudErrorBody]'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CloudErrorBody, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.details = kwargs.get('details', None)
+ self.message = kwargs.get('message', None)
+ self.target = kwargs.get('target', None)
+
+
+class ErrorResponse(msrest.serialization.Model):
+ """Describes the format of Error response.
+
+ :param code: Error code.
+ :type code: str
+ :param message: Error message indicating why the operation failed.
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.code = kwargs.get('code', None)
+ self.message = kwargs.get('message', None)
+
+
+class MetricDimension(msrest.serialization.Model):
+ """Specifications of the Dimension of metrics.
+
+ :param name: Name of the dimension.
+ :type name: str
+ :param display_name: Localized friendly display name of the dimension.
+ :type display_name: str
+ :param internal_name: Internal name of the dimension.
+ :type internal_name: str
+ :param to_be_exported_for_shoebox: To be exported to shoe box.
+ :type to_be_exported_for_shoebox: bool
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'internal_name': {'key': 'internalName', 'type': 'str'},
+ 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(MetricDimension, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.display_name = kwargs.get('display_name', None)
+ self.internal_name = kwargs.get('internal_name', None)
+ self.to_be_exported_for_shoebox = kwargs.get('to_be_exported_for_shoebox', None)
+
+
+class MetricSpecification(msrest.serialization.Model):
+ """Details about operation related to metrics.
+
+ :param name: The name of the metric.
+ :type name: str
+ :param display_name: Localized display name of the metric.
+ :type display_name: str
+ :param display_description: The description of the metric.
+ :type display_description: str
+ :param unit: The unit that the metric is measured in.
+ :type unit: str
+ :param aggregation_type: The type of metric aggregation.
+ :type aggregation_type: str
+ :param supported_aggregation_types: Support metric aggregation type.
+ :type supported_aggregation_types: list[str or
+ ~storage_cache_management_client.models.MetricAggregationType]
+ :param metric_class: Type of metrics.
+ :type metric_class: str
+ :param dimensions: Dimensions of the metric.
+ :type dimensions: list[~storage_cache_management_client.models.MetricDimension]
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'display_description': {'key': 'displayDescription', 'type': 'str'},
+ 'unit': {'key': 'unit', 'type': 'str'},
+ 'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
+ 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
+ 'metric_class': {'key': 'metricClass', 'type': 'str'},
+ 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(MetricSpecification, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.display_name = kwargs.get('display_name', None)
+ self.display_description = kwargs.get('display_description', None)
+ self.unit = kwargs.get('unit', None)
+ self.aggregation_type = kwargs.get('aggregation_type', None)
+ self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
+ self.metric_class = kwargs.get('metric_class', None)
+ self.dimensions = kwargs.get('dimensions', None)
+
+
+class NamespaceJunction(msrest.serialization.Model):
+ """A namespace junction.
+
+ :param namespace_path: Namespace path on a Cache for a Storage Target.
+ :type namespace_path: str
+ :param target_path: Path in Storage Target to which namespacePath points.
+ :type target_path: str
+ :param nfs_export: NFS export where targetPath exists.
+ :type nfs_export: str
+ :param nfs_access_policy: Name of the access policy applied to this junction.
+ :type nfs_access_policy: str
+ """
+
+ _attribute_map = {
+ 'namespace_path': {'key': 'namespacePath', 'type': 'str'},
+ 'target_path': {'key': 'targetPath', 'type': 'str'},
+ 'nfs_export': {'key': 'nfsExport', 'type': 'str'},
+ 'nfs_access_policy': {'key': 'nfsAccessPolicy', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(NamespaceJunction, self).__init__(**kwargs)
+ self.namespace_path = kwargs.get('namespace_path', None)
+ self.target_path = kwargs.get('target_path', None)
+ self.nfs_export = kwargs.get('nfs_export', None)
+ self.nfs_access_policy = kwargs.get('nfs_access_policy', None)
+
+
+class Nfs3Target(msrest.serialization.Model):
+ """Properties pertaining to the Nfs3Target.
+
+ :param target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44).
+ :type target: str
+ :param usage_model: Identifies the usage model to be used for this Storage Target. Get choices
+ from .../usageModels.
+ :type usage_model: str
+ """
+
+ _validation = {
+ 'target': {'pattern': r'^[-.0-9a-zA-Z]+$'},
+ }
+
+ _attribute_map = {
+ 'target': {'key': 'target', 'type': 'str'},
+ 'usage_model': {'key': 'usageModel', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(Nfs3Target, self).__init__(**kwargs)
+ self.target = kwargs.get('target', None)
+ self.usage_model = kwargs.get('usage_model', None)
+
+
+class NfsAccessPolicy(msrest.serialization.Model):
+ """A set of rules describing access policies applied to NFSv3 clients of the cache.
+
+ :param name: Name identifying this policy. Access Policy names are not case sensitive.
+ :type name: str
+ :param access_rules: The set of rules describing client accesses allowed under this policy.
+ :type access_rules: list[~storage_cache_management_client.models.NfsAccessRule]
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'access_rules': {'key': 'accessRules', 'type': '[NfsAccessRule]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(NfsAccessPolicy, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.access_rules = kwargs.get('access_rules', None)
+
+
+class NfsAccessRule(msrest.serialization.Model):
+ """Rule to place restrictions on portions of the NFS namespace being presented to clients.
+
+ :param scope: Scope applied to this rule. Possible values include: "default", "network",
+ "host".
+ :type scope: str or ~storage_cache_management_client.models.NfsAccessRuleScope
+ :param filter: Filter applied to this rule. The filter's format depends on its scope.
+ 'default' scope is reserved for system use. 'network' is in CIDR format (e.g., 10.99.1.0/24)
+ and 'host' is an IP address or fully qualified domain name.
+ :type filter: str
+ :param access: Access allowed by this rule. Possible values include: "no", "ro", "rw".
+ :type access: str or ~storage_cache_management_client.models.NfsAccessRuleAccess
+ :param suid: Allow SUID semantics.
+ :type suid: bool
+ :param submount_access: Allow mounts below the junction.
+ :type submount_access: bool
+ :param root_squash: Map root accesses to anonymousUID and anonymousGID.
+ :type root_squash: bool
+ :param anonymous_uid: UID value that replaces 0 when rootSquash is true.
+ :type anonymous_uid: str
+ :param anonymous_gid: GID value that replaces 0 when rootSquash is true.
+ :type anonymous_gid: str
+ """
+
+ _attribute_map = {
+ 'scope': {'key': 'scope', 'type': 'str'},
+ 'filter': {'key': 'filter', 'type': 'str'},
+ 'access': {'key': 'access', 'type': 'str'},
+ 'suid': {'key': 'suid', 'type': 'bool'},
+ 'submount_access': {'key': 'submountAccess', 'type': 'bool'},
+ 'root_squash': {'key': 'rootSquash', 'type': 'bool'},
+ 'anonymous_uid': {'key': 'anonymousUID', 'type': 'str'},
+ 'anonymous_gid': {'key': 'anonymousGID', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(NfsAccessRule, self).__init__(**kwargs)
+ self.scope = kwargs.get('scope', None)
+ self.filter = kwargs.get('filter', None)
+ self.access = kwargs.get('access', None)
+ self.suid = kwargs.get('suid', None)
+ self.submount_access = kwargs.get('submount_access', None)
+ self.root_squash = kwargs.get('root_squash', None)
+ self.anonymous_uid = kwargs.get('anonymous_uid', "-2")
+ self.anonymous_gid = kwargs.get('anonymous_gid', "-2")
+
+
+class ResourceSku(msrest.serialization.Model):
+ """A resource SKU.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar resource_type: The type of resource the SKU applies to.
+ :vartype resource_type: str
+ :param capabilities: A list of capabilities of this SKU, such as throughput or ops/sec.
+ :type capabilities: list[~storage_cache_management_client.models.ResourceSkuCapabilities]
+ :ivar locations: The set of locations where the SKU is available. This is the supported and
+ registered Azure Geo Regions (e.g., West US, East US, Southeast Asia, etc.).
+ :vartype locations: list[str]
+ :param location_info: The set of locations where the SKU is available.
+ :type location_info: list[~storage_cache_management_client.models.ResourceSkuLocationInfo]
+ :param name: The name of this SKU.
+ :type name: str
+ :param restrictions: The restrictions preventing this SKU from being used. This is empty if
+ there are no restrictions.
+ :type restrictions: list[~storage_cache_management_client.models.Restriction]
+ """
+
+ _validation = {
+ 'resource_type': {'readonly': True},
+ 'locations': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'resource_type': {'key': 'resourceType', 'type': 'str'},
+ 'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'},
+ 'locations': {'key': 'locations', 'type': '[str]'},
+ 'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'restrictions': {'key': 'restrictions', 'type': '[Restriction]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ResourceSku, self).__init__(**kwargs)
+ self.resource_type = None
+ self.capabilities = kwargs.get('capabilities', None)
+ self.locations = None
+ self.location_info = kwargs.get('location_info', None)
+ self.name = kwargs.get('name', None)
+ self.restrictions = kwargs.get('restrictions', None)
+
+
+class ResourceSkuCapabilities(msrest.serialization.Model):
+ """A resource SKU capability.
+
+ :param name: Name of a capability, such as ops/sec.
+ :type name: str
+ :param value: Quantity, if the capability is measured by quantity.
+ :type value: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'value': {'key': 'value', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ResourceSkuCapabilities, self).__init__(**kwargs)
+ self.name = kwargs.get('name', None)
+ self.value = kwargs.get('value', None)
+
+
+class ResourceSkuLocationInfo(msrest.serialization.Model):
+ """Resource SKU location information.
+
+ :param location: Location where this SKU is available.
+ :type location: str
+ :param zones: Zones if any.
+ :type zones: list[str]
+ """
+
+ _attribute_map = {
+ 'location': {'key': 'location', 'type': 'str'},
+ 'zones': {'key': 'zones', 'type': '[str]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ResourceSkuLocationInfo, self).__init__(**kwargs)
+ self.location = kwargs.get('location', None)
+ self.zones = kwargs.get('zones', None)
+
+
+class ResourceSkusResult(msrest.serialization.Model):
+ """The response from the List Cache SKUs operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :param next_link: The URI to fetch the next page of Cache SKUs.
+ :type next_link: str
+ :ivar value: The list of SKUs available for the subscription.
+ :vartype value: list[~storage_cache_management_client.models.ResourceSku]
+ """
+
+ _validation = {
+ 'value': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[ResourceSku]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(ResourceSkusResult, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+ self.value = None
+
+
+class Restriction(msrest.serialization.Model):
+ """The restrictions preventing this SKU from being used.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar type: The type of restrictions. In this version, the only possible value for this is
+ location.
+ :vartype type: str
+ :ivar values: The value of restrictions. If the restriction type is set to location, then this
+ would be the different locations where the SKU is restricted.
+ :vartype values: list[str]
+ :param reason_code: The reason for the restriction. As of now this can be "QuotaId" or
+ "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as
+ the subscription does not belong to that quota. "NotAvailableForSubscription" is related to
+ capacity at the datacenter. Possible values include: "QuotaId", "NotAvailableForSubscription".
+ :type reason_code: str or ~storage_cache_management_client.models.ReasonCode
+ """
+
+ _validation = {
+ 'type': {'readonly': True},
+ 'values': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'type': {'key': 'type', 'type': 'str'},
+ 'values': {'key': 'values', 'type': '[str]'},
+ 'reason_code': {'key': 'reasonCode', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(Restriction, self).__init__(**kwargs)
+ self.type = None
+ self.values = None
+ self.reason_code = kwargs.get('reason_code', None)
+
+
+class StorageTargetResource(msrest.serialization.Model):
+ """Resource used by a Cache.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar name: Name of the Storage Target.
+ :vartype name: str
+ :ivar id: Resource ID of the Storage Target.
+ :vartype id: str
+ :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget.
+ :vartype type: str
+ :ivar location: Region name string.
+ :vartype location: str
+ :ivar system_data: The system meta data relating to this resource.
+ :vartype system_data: ~storage_cache_management_client.models.SystemData
+ """
+
+ _validation = {
+ 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'},
+ 'id': {'readonly': True},
+ 'type': {'readonly': True},
+ 'location': {'readonly': True},
+ 'system_data': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'location': {'key': 'location', 'type': 'str'},
+ 'system_data': {'key': 'systemData', 'type': 'SystemData'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(StorageTargetResource, self).__init__(**kwargs)
+ self.name = None
+ self.id = None
+ self.type = None
+ self.location = None
+ self.system_data = None
+
+
+class StorageTarget(StorageTargetResource):
+ """Type of the Storage Target.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar name: Name of the Storage Target.
+ :vartype name: str
+ :ivar id: Resource ID of the Storage Target.
+ :vartype id: str
+ :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget.
+ :vartype type: str
+ :ivar location: Region name string.
+ :vartype location: str
+ :ivar system_data: The system meta data relating to this resource.
+ :vartype system_data: ~storage_cache_management_client.models.SystemData
+ :param junctions: List of Cache namespace junctions to target for namespace associations.
+ :type junctions: list[~storage_cache_management_client.models.NamespaceJunction]
+ :param target_type: Type of the Storage Target. Possible values include: "nfs3", "clfs",
+ "unknown".
+ :type target_type: str or ~storage_cache_management_client.models.StorageTargetType
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include:
+ "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating".
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param nfs3: Properties when targetType is nfs3.
+ :type nfs3: ~storage_cache_management_client.models.Nfs3Target
+ :param unknown_map: Dictionary of string->string pairs containing information about the Storage
+ Target.
+ :type unknown_map: dict[str, str]
+ :param target: Resource ID of storage container.
+ :type target: str
+ """
+
+ _validation = {
+ 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'},
+ 'id': {'readonly': True},
+ 'type': {'readonly': True},
+ 'location': {'readonly': True},
+ 'system_data': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'location': {'key': 'location', 'type': 'str'},
+ 'system_data': {'key': 'systemData', 'type': 'SystemData'},
+ 'junctions': {'key': 'properties.junctions', 'type': '[NamespaceJunction]'},
+ 'target_type': {'key': 'properties.targetType', 'type': 'str'},
+ 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
+ 'nfs3': {'key': 'properties.nfs3', 'type': 'Nfs3Target'},
+ 'unknown_map': {'key': 'unknown.unknownMap', 'type': '{str}'},
+ 'target': {'key': 'clfs.target', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(StorageTarget, self).__init__(**kwargs)
+ self.junctions = kwargs.get('junctions', None)
+ self.target_type = kwargs.get('target_type', None)
+ self.provisioning_state = kwargs.get('provisioning_state', None)
+ self.nfs3 = kwargs.get('nfs3', None)
+ self.unknown_map = kwargs.get('unknown_map', None)
+ self.target = kwargs.get('target', None)
+
+
+class StorageTargetsResult(msrest.serialization.Model):
+ """A list of Storage Targets.
+
+ :param next_link: The URI to fetch the next page of Storage Targets.
+ :type next_link: str
+ :param value: The list of Storage Targets defined for the Cache.
+ :type value: list[~storage_cache_management_client.models.StorageTarget]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[StorageTarget]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(StorageTargetsResult, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+ self.value = kwargs.get('value', None)
+
+
+class SystemData(msrest.serialization.Model):
+ """Metadata pertaining to creation and last modification of the resource.
+
+ :param created_by: The identity that created the resource.
+ :type created_by: str
+ :param created_by_type: The type of identity that created the resource. Possible values
+ include: "User", "Application", "ManagedIdentity", "Key".
+ :type created_by_type: str or ~storage_cache_management_client.models.CreatedByType
+ :param created_at: The timestamp of resource creation (UTC).
+ :type created_at: ~datetime.datetime
+ :param last_modified_by: The identity that last modified the resource.
+ :type last_modified_by: str
+ :param last_modified_by_type: The type of identity that last modified the resource. Possible
+ values include: "User", "Application", "ManagedIdentity", "Key".
+ :type last_modified_by_type: str or ~storage_cache_management_client.models.CreatedByType
+ :param last_modified_at: The type of identity that last modified the resource.
+ :type last_modified_at: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ 'created_by': {'key': 'createdBy', 'type': 'str'},
+ 'created_by_type': {'key': 'createdByType', 'type': 'str'},
+ 'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
+ 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
+ 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
+ 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(SystemData, self).__init__(**kwargs)
+ self.created_by = kwargs.get('created_by', None)
+ self.created_by_type = kwargs.get('created_by_type', None)
+ self.created_at = kwargs.get('created_at', None)
+ self.last_modified_by = kwargs.get('last_modified_by', None)
+ self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
+ self.last_modified_at = kwargs.get('last_modified_at', None)
+
+
+class UsageModel(msrest.serialization.Model):
+ """A usage model.
+
+ :param display: Localized information describing this usage model.
+ :type display: ~storage_cache_management_client.models.UsageModelDisplay
+ :param model_name: Non-localized keyword name for this usage model.
+ :type model_name: str
+ :param target_type: The type of Storage Target to which this model is applicable (only nfs3 as
+ of this version).
+ :type target_type: str
+ """
+
+ _attribute_map = {
+ 'display': {'key': 'display', 'type': 'UsageModelDisplay'},
+ 'model_name': {'key': 'modelName', 'type': 'str'},
+ 'target_type': {'key': 'targetType', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(UsageModel, self).__init__(**kwargs)
+ self.display = kwargs.get('display', None)
+ self.model_name = kwargs.get('model_name', None)
+ self.target_type = kwargs.get('target_type', None)
+
+
+class UsageModelDisplay(msrest.serialization.Model):
+ """Localized information describing this usage model.
+
+ :param description: String to display for this usage model.
+ :type description: str
+ """
+
+ _attribute_map = {
+ 'description': {'key': 'description', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(UsageModelDisplay, self).__init__(**kwargs)
+ self.description = kwargs.get('description', None)
+
+
+class UsageModelsResult(msrest.serialization.Model):
+ """A list of Cache usage models.
+
+ :param next_link: The URI to fetch the next page of Cache usage models.
+ :type next_link: str
+ :param value: The list of usage models available for the subscription.
+ :type value: list[~storage_cache_management_client.models.UsageModel]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[UsageModel]'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(UsageModelsResult, self).__init__(**kwargs)
+ self.next_link = kwargs.get('next_link', None)
+ self.value = kwargs.get('value', None)
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py
new file mode 100644
index 00000000000..3fa4e09113e
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_models_py3.py
@@ -0,0 +1,1358 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+import datetime
+from typing import Dict, List, Optional, Union
+
+import msrest.serialization
+
+from ._storage_cache_management_client_enums import *
+
+
+class ApiOperation(msrest.serialization.Model):
+ """REST API operation description: see https://github.com/Azure/azure-rest-api-specs/blob/master/documentation/openapi-authoring-automated-guidelines.md#r3023-operationsapiimplementation.
+
+ :param display: The object that represents the operation.
+ :type display: ~storage_cache_management_client.models.ApiOperationDisplay
+ :param origin: Origin of the operation.
+ :type origin: str
+ :param is_data_action: The flag that indicates whether the operation applies to data plane.
+ :type is_data_action: bool
+ :param name: Operation name: {provider}/{resource}/{operation}.
+ :type name: str
+ :param service_specification: Specification of the all the metrics provided for a resource
+ type.
+ :type service_specification:
+ ~storage_cache_management_client.models.ApiOperationPropertiesServiceSpecification
+ """
+
+ _attribute_map = {
+ 'display': {'key': 'display', 'type': 'ApiOperationDisplay'},
+ 'origin': {'key': 'origin', 'type': 'str'},
+ 'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ApiOperationPropertiesServiceSpecification'},
+ }
+
+ def __init__(
+ self,
+ *,
+ display: Optional["ApiOperationDisplay"] = None,
+ origin: Optional[str] = None,
+ is_data_action: Optional[bool] = None,
+ name: Optional[str] = None,
+ service_specification: Optional["ApiOperationPropertiesServiceSpecification"] = None,
+ **kwargs
+ ):
+ super(ApiOperation, self).__init__(**kwargs)
+ self.display = display
+ self.origin = origin
+ self.is_data_action = is_data_action
+ self.name = name
+ self.service_specification = service_specification
+
+
+class ApiOperationDisplay(msrest.serialization.Model):
+ """The object that represents the operation.
+
+ :param operation: Operation type: Read, write, delete, etc.
+ :type operation: str
+ :param provider: Service provider: Microsoft.StorageCache.
+ :type provider: str
+ :param resource: Resource on which the operation is performed: Cache, etc.
+ :type resource: str
+ :param description: The description of the operation.
+ :type description: str
+ """
+
+ _attribute_map = {
+ 'operation': {'key': 'operation', 'type': 'str'},
+ 'provider': {'key': 'provider', 'type': 'str'},
+ 'resource': {'key': 'resource', 'type': 'str'},
+ 'description': {'key': 'description', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ operation: Optional[str] = None,
+ provider: Optional[str] = None,
+ resource: Optional[str] = None,
+ description: Optional[str] = None,
+ **kwargs
+ ):
+ super(ApiOperationDisplay, self).__init__(**kwargs)
+ self.operation = operation
+ self.provider = provider
+ self.resource = resource
+ self.description = description
+
+
+class ApiOperationListResult(msrest.serialization.Model):
+ """Result of the request to list Resource Provider operations. It contains a list of operations and a URL link to get the next set of results.
+
+ :param next_link: URL to get the next set of operation list results if there are any.
+ :type next_link: str
+ :param value: List of Resource Provider operations supported by the Microsoft.StorageCache
+ resource provider.
+ :type value: list[~storage_cache_management_client.models.ApiOperation]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[ApiOperation]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["ApiOperation"]] = None,
+ **kwargs
+ ):
+ super(ApiOperationListResult, self).__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class ApiOperationPropertiesServiceSpecification(msrest.serialization.Model):
+ """Specification of the all the metrics provided for a resource type.
+
+ :param metric_specifications: Details about operations related to metrics.
+ :type metric_specifications: list[~storage_cache_management_client.models.MetricSpecification]
+ """
+
+ _attribute_map = {
+ 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ metric_specifications: Optional[List["MetricSpecification"]] = None,
+ **kwargs
+ ):
+ super(ApiOperationPropertiesServiceSpecification, self).__init__(**kwargs)
+ self.metric_specifications = metric_specifications
+
+
+class ASCOperation(msrest.serialization.Model):
+ """The status of operation.
+
+ :param id: The operation Id.
+ :type id: str
+ :param name: The operation name.
+ :type name: str
+ :param start_time: The start time of the operation.
+ :type start_time: str
+ :param end_time: The end time of the operation.
+ :type end_time: str
+ :param status: The status of the operation.
+ :type status: str
+ :param error: The error detail of the operation if any.
+ :type error: ~storage_cache_management_client.models.ErrorResponse
+ """
+
+ _attribute_map = {
+ 'id': {'key': 'id', 'type': 'str'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'start_time': {'key': 'startTime', 'type': 'str'},
+ 'end_time': {'key': 'endTime', 'type': 'str'},
+ 'status': {'key': 'status', 'type': 'str'},
+ 'error': {'key': 'error', 'type': 'ErrorResponse'},
+ }
+
+ def __init__(
+ self,
+ *,
+ id: Optional[str] = None,
+ name: Optional[str] = None,
+ start_time: Optional[str] = None,
+ end_time: Optional[str] = None,
+ status: Optional[str] = None,
+ error: Optional["ErrorResponse"] = None,
+ **kwargs
+ ):
+ super(ASCOperation, self).__init__(**kwargs)
+ self.id = id
+ self.name = name
+ self.start_time = start_time
+ self.end_time = end_time
+ self.status = status
+ self.error = error
+
+
+class Cache(msrest.serialization.Model):
+ """A Cache instance. Follows Azure Resource Manager standards: https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/resource-api-reference.md.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :param tags: A set of tags. ARM tags as name/value pairs.
+ :type tags: object
+ :ivar id: Resource ID of the Cache.
+ :vartype id: str
+ :param location: Region name string.
+ :type location: str
+ :ivar name: Name of Cache.
+ :vartype name: str
+ :ivar type: Type of the Cache; Microsoft.StorageCache/Cache.
+ :vartype type: str
+ :ivar system_data: The system meta data relating to this resource.
+ :vartype system_data: ~storage_cache_management_client.models.SystemData
+ :param cache_size_gb: The size of this Cache, in GB.
+ :type cache_size_gb: int
+ :ivar health: Health of the Cache.
+ :vartype health: ~storage_cache_management_client.models.CacheHealth
+ :ivar mount_addresses: Array of IP addresses that can be used by clients mounting this Cache.
+ :vartype mount_addresses: list[str]
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include:
+ "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating".
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param subnet: Subnet used for the Cache.
+ :type subnet: str
+ :param upgrade_status: Upgrade status of the Cache.
+ :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus
+ :param extended_groups_enabled: This indicates if Extended Groups is enabled.
+ :type extended_groups_enabled: bool
+ :param username_source: This setting determines how the system gets username and group names
+ for clients. Possible values include: "AD", "LDAP", "File", "None". Default value: "None".
+ :type username_source: str or ~storage_cache_management_client.models.UsernameSource
+ :param group_file_uri: The URI of the file containing the group information (in /etc/group file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type group_file_uri: str
+ :param user_file_uri: The URI of the file containing the user information (in /etc/passwd file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type user_file_uri: str
+ :param ldap_server: The fully qualified domain name or IP address of the LDAP server to use.
+ :type ldap_server: str
+ :param ldap_base_dn: The base distinguished name for the LDAP domain.
+ :type ldap_base_dn: str
+ :param encrypt_ldap_connection: This indicates if the LDAP connection should be encrypted.
+ :type encrypt_ldap_connection: bool
+ :param require_valid_certificate: Determines if the certificates should be validated by a
+ certificate authority. When true, caCertificateURI must be provided.
+ :type require_valid_certificate: bool
+ :param auto_download_certificate: Determines if the certificate should be automatically
+ downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a
+ self signed certificate otherwise.
+ :type auto_download_certificate: bool
+ :param ca_certificate_uri: The URI of the CA certificate to validate the LDAP secure
+ connection. This field must be populated when 'requireValidCertificate' is set to true.
+ :type ca_certificate_uri: str
+ :ivar username_downloaded: Indicates if the HPC Cache has performed the username download
+ successfully. Possible values include: "Yes", "No", "Error".
+ :vartype username_downloaded: str or
+ ~storage_cache_management_client.models.UsernameDownloadedType
+ :param credentials_directory_services_settings_username_download_credentials: When present,
+ these are the credentials for the secure LDAP connection.
+ :type credentials_directory_services_settings_username_download_credentials:
+ ~storage_cache_management_client.models.CacheUsernameDownloadSettingsCredentials
+ :param primary_dns_ip_address: Primary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type primary_dns_ip_address: str
+ :param secondary_dns_ip_address: Secondary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type secondary_dns_ip_address: str
+ :param domain_name: The fully qualified domain name of the Active Directory domain controller.
+ :type domain_name: str
+ :param domain_net_bios: The Active Directory domain's NetBIOS name.
+ :type domain_net_bios: str
+ :param smb_server_name: The name (NetBIOS) used for the HPC Cache to join the Active Directory
+ domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type smb_server_name: str
+ :ivar domain_joined: This field indicates if the HPC Cache is joined to the Active Directory
+ domain. Possible values include: "Yes", "No", "Error".
+ :vartype domain_joined: str or ~storage_cache_management_client.models.DomainJoinedType
+ :param credentials_directory_services_settings_active_directory_credentials: Active Directory
+ admin or user credentials used to join the HPC Cache to a domain.
+ :type credentials_directory_services_settings_active_directory_credentials:
+ ~storage_cache_management_client.models.CacheActiveDirectorySettingsCredentials
+ :param access_policies: NFS access policies defined for this cache.
+ :type access_policies: list[~storage_cache_management_client.models.NfsAccessPolicy]
+ :param key_url: The URL referencing a key encryption key in Key Vault.
+ :type key_url: str
+ :param id_encryption_settings_key_encryption_key_source_vault_id: Resource Id.
+ :type id_encryption_settings_key_encryption_key_source_vault_id: str
+ :param mtu: The IPv4 maximum transmission unit configured for the subnet.
+ :type mtu: int
+ :ivar utility_addresses: Array of additional IP addresses used by this Cache.
+ :vartype utility_addresses: list[str]
+ :param name_sku_name: SKU name for this Cache.
+ :type name_sku_name: str
+ :ivar principal_id: The principal id of the cache.
+ :vartype principal_id: str
+ :ivar tenant_id: The tenant id associated with the cache.
+ :vartype tenant_id: str
+ :param type_identity_type: The type of identity used for the cache. Possible values include:
+ "SystemAssigned", "None".
+ :type type_identity_type: str or ~storage_cache_management_client.models.CacheIdentityType
+ """
+
+ _validation = {
+ 'id': {'readonly': True},
+ 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'},
+ 'type': {'readonly': True},
+ 'system_data': {'readonly': True},
+ 'health': {'readonly': True},
+ 'mount_addresses': {'readonly': True},
+ 'username_downloaded': {'readonly': True},
+ 'smb_server_name': {'pattern': r'^[-0-9a-zA-Z]{1,15}$'},
+ 'domain_joined': {'readonly': True},
+ 'mtu': {'maximum': 1500, 'minimum': 576},
+ 'utility_addresses': {'readonly': True},
+ 'principal_id': {'readonly': True},
+ 'tenant_id': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'tags': {'key': 'tags', 'type': 'object'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'location': {'key': 'location', 'type': 'str'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'system_data': {'key': 'systemData', 'type': 'SystemData'},
+ 'cache_size_gb': {'key': 'properties.cacheSizeGB', 'type': 'int'},
+ 'health': {'key': 'properties.health', 'type': 'CacheHealth'},
+ 'mount_addresses': {'key': 'properties.mountAddresses', 'type': '[str]'},
+ 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
+ 'subnet': {'key': 'properties.subnet', 'type': 'str'},
+ 'upgrade_status': {'key': 'properties.upgradeStatus', 'type': 'CacheUpgradeStatus'},
+ 'extended_groups_enabled': {'key': 'directoryServicesSettings.usernameDownload.extendedGroupsEnabled', 'type': 'bool'},
+ 'username_source': {'key': 'directoryServicesSettings.usernameDownload.usernameSource', 'type': 'str'},
+ 'group_file_uri': {'key': 'directoryServicesSettings.usernameDownload.groupFileURI', 'type': 'str'},
+ 'user_file_uri': {'key': 'directoryServicesSettings.usernameDownload.userFileURI', 'type': 'str'},
+ 'ldap_server': {'key': 'directoryServicesSettings.usernameDownload.ldapServer', 'type': 'str'},
+ 'ldap_base_dn': {'key': 'directoryServicesSettings.usernameDownload.ldapBaseDn', 'type': 'str'},
+ 'encrypt_ldap_connection': {'key': 'directoryServicesSettings.usernameDownload.encryptLdapConnection', 'type': 'bool'},
+ 'require_valid_certificate': {'key': 'directoryServicesSettings.usernameDownload.requireValidCertificate', 'type': 'bool'},
+ 'auto_download_certificate': {'key': 'directoryServicesSettings.usernameDownload.autoDownloadCertificate', 'type': 'bool'},
+ 'ca_certificate_uri': {'key': 'directoryServicesSettings.usernameDownload.caCertificateURI', 'type': 'str'},
+ 'username_downloaded': {'key': 'directoryServicesSettings.usernameDownload.usernameDownloaded', 'type': 'str'},
+ 'credentials_directory_services_settings_username_download_credentials': {'key': 'directoryServicesSettings.usernameDownload.credentials', 'type': 'CacheUsernameDownloadSettingsCredentials'},
+ 'primary_dns_ip_address': {'key': 'directoryServicesSettings.activeDirectory.primaryDnsIpAddress', 'type': 'str'},
+ 'secondary_dns_ip_address': {'key': 'directoryServicesSettings.activeDirectory.secondaryDnsIpAddress', 'type': 'str'},
+ 'domain_name': {'key': 'directoryServicesSettings.activeDirectory.domainName', 'type': 'str'},
+ 'domain_net_bios': {'key': 'directoryServicesSettings.activeDirectory.domainNetBios', 'type': 'str'},
+ 'smb_server_name': {'key': 'directoryServicesSettings.activeDirectory.smbServerName', 'type': 'str'},
+ 'domain_joined': {'key': 'directoryServicesSettings.activeDirectory.domainJoined', 'type': 'str'},
+ 'credentials_directory_services_settings_active_directory_credentials': {'key': 'directoryServicesSettings.activeDirectory.credentials', 'type': 'CacheActiveDirectorySettingsCredentials'},
+ 'access_policies': {'key': 'securitySettings.accessPolicies', 'type': '[NfsAccessPolicy]'},
+ 'key_url': {'key': 'encryptionSettings.keyEncryptionKey.keyUrl', 'type': 'str'},
+ 'id_encryption_settings_key_encryption_key_source_vault_id': {'key': 'encryptionSettings.keyEncryptionKey.sourceVault.id', 'type': 'str'},
+ 'mtu': {'key': 'networkSettings.mtu', 'type': 'int'},
+ 'utility_addresses': {'key': 'networkSettings.utilityAddresses', 'type': '[str]'},
+ 'name_sku_name': {'key': 'sku.name', 'type': 'str'},
+ 'principal_id': {'key': 'identity.principalId', 'type': 'str'},
+ 'tenant_id': {'key': 'identity.tenantId', 'type': 'str'},
+ 'type_identity_type': {'key': 'identity.type', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ tags: Optional[object] = None,
+ location: Optional[str] = None,
+ cache_size_gb: Optional[int] = None,
+ provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None,
+ subnet: Optional[str] = None,
+ upgrade_status: Optional["CacheUpgradeStatus"] = None,
+ extended_groups_enabled: Optional[bool] = None,
+ username_source: Optional[Union[str, "UsernameSource"]] = "None",
+ group_file_uri: Optional[str] = None,
+ user_file_uri: Optional[str] = None,
+ ldap_server: Optional[str] = None,
+ ldap_base_dn: Optional[str] = None,
+ encrypt_ldap_connection: Optional[bool] = None,
+ require_valid_certificate: Optional[bool] = None,
+ auto_download_certificate: Optional[bool] = None,
+ ca_certificate_uri: Optional[str] = None,
+ credentials_directory_services_settings_username_download_credentials: Optional["CacheUsernameDownloadSettingsCredentials"] = None,
+ primary_dns_ip_address: Optional[str] = None,
+ secondary_dns_ip_address: Optional[str] = None,
+ domain_name: Optional[str] = None,
+ domain_net_bios: Optional[str] = None,
+ smb_server_name: Optional[str] = None,
+ credentials_directory_services_settings_active_directory_credentials: Optional["CacheActiveDirectorySettingsCredentials"] = None,
+ access_policies: Optional[List["NfsAccessPolicy"]] = None,
+ key_url: Optional[str] = None,
+ id_encryption_settings_key_encryption_key_source_vault_id: Optional[str] = None,
+ mtu: Optional[int] = 1500,
+ name_sku_name: Optional[str] = None,
+ type_identity_type: Optional[Union[str, "CacheIdentityType"]] = None,
+ **kwargs
+ ):
+ super(Cache, self).__init__(**kwargs)
+ self.tags = tags
+ self.id = None
+ self.location = location
+ self.name = None
+ self.type = None
+ self.system_data = None
+ self.cache_size_gb = cache_size_gb
+ self.health = None
+ self.mount_addresses = None
+ self.provisioning_state = provisioning_state
+ self.subnet = subnet
+ self.upgrade_status = upgrade_status
+ self.extended_groups_enabled = extended_groups_enabled
+ self.username_source = username_source
+ self.group_file_uri = group_file_uri
+ self.user_file_uri = user_file_uri
+ self.ldap_server = ldap_server
+ self.ldap_base_dn = ldap_base_dn
+ self.encrypt_ldap_connection = encrypt_ldap_connection
+ self.require_valid_certificate = require_valid_certificate
+ self.auto_download_certificate = auto_download_certificate
+ self.ca_certificate_uri = ca_certificate_uri
+ self.username_downloaded = None
+ self.credentials_directory_services_settings_username_download_credentials = credentials_directory_services_settings_username_download_credentials
+ self.primary_dns_ip_address = primary_dns_ip_address
+ self.secondary_dns_ip_address = secondary_dns_ip_address
+ self.domain_name = domain_name
+ self.domain_net_bios = domain_net_bios
+ self.smb_server_name = smb_server_name
+ self.domain_joined = None
+ self.credentials_directory_services_settings_active_directory_credentials = credentials_directory_services_settings_active_directory_credentials
+ self.access_policies = access_policies
+ self.key_url = key_url
+ self.id_encryption_settings_key_encryption_key_source_vault_id = id_encryption_settings_key_encryption_key_source_vault_id
+ self.mtu = mtu
+ self.utility_addresses = None
+ self.name_sku_name = name_sku_name
+ self.principal_id = None
+ self.tenant_id = None
+ self.type_identity_type = type_identity_type
+
+
+class CacheActiveDirectorySettingsCredentials(msrest.serialization.Model):
+ """Active Directory admin or user credentials used to join the HPC Cache to a domain.
+
+ All required parameters must be populated in order to send to Azure.
+
+ :param username: Required. User name of the Active Directory domain administrator. This value
+ is stored encrypted and not returned on response.
+ :type username: str
+ :param password: Required. Plain text password of the Active Directory domain administrator.
+ This value is stored encrypted and not returned on response.
+ :type password: str
+ """
+
+ _validation = {
+ 'username': {'required': True},
+ 'password': {'required': True},
+ }
+
+ _attribute_map = {
+ 'username': {'key': 'username', 'type': 'str'},
+ 'password': {'key': 'password', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ username: str,
+ password: str,
+ **kwargs
+ ):
+ super(CacheActiveDirectorySettingsCredentials, self).__init__(**kwargs)
+ self.username = username
+ self.password = password
+
+
+class CacheHealth(msrest.serialization.Model):
+ """An indication of Cache health. Gives more information about health than just that related to provisioning.
+
+ :param state: List of Cache health states. Possible values include: "Unknown", "Healthy",
+ "Degraded", "Down", "Transitioning", "Stopping", "Stopped", "Upgrading", "Flushing".
+ :type state: str or ~storage_cache_management_client.models.HealthStateType
+ :param status_description: Describes explanation of state.
+ :type status_description: str
+ """
+
+ _attribute_map = {
+ 'state': {'key': 'state', 'type': 'str'},
+ 'status_description': {'key': 'statusDescription', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ state: Optional[Union[str, "HealthStateType"]] = None,
+ status_description: Optional[str] = None,
+ **kwargs
+ ):
+ super(CacheHealth, self).__init__(**kwargs)
+ self.state = state
+ self.status_description = status_description
+
+
+class CachesListResult(msrest.serialization.Model):
+ """Result of the request to list Caches. It contains a list of Caches and a URL link to get the next set of results.
+
+ :param next_link: URL to get the next set of Cache list results, if there are any.
+ :type next_link: str
+ :param value: List of Caches.
+ :type value: list[~storage_cache_management_client.models.Cache]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[Cache]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["Cache"]] = None,
+ **kwargs
+ ):
+ super(CachesListResult, self).__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class CacheUpgradeStatus(msrest.serialization.Model):
+ """Properties describing the software upgrade state of the Cache.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar current_firmware_version: Version string of the firmware currently installed on this
+ Cache.
+ :vartype current_firmware_version: str
+ :ivar firmware_update_status: True if there is a firmware update ready to install on this
+ Cache. The firmware will automatically be installed after firmwareUpdateDeadline if not
+ triggered earlier via the upgrade operation. Possible values include: "available",
+ "unavailable".
+ :vartype firmware_update_status: str or
+ ~storage_cache_management_client.models.FirmwareStatusType
+ :ivar firmware_update_deadline: Time at which the pending firmware update will automatically be
+ installed on the Cache.
+ :vartype firmware_update_deadline: ~datetime.datetime
+ :ivar last_firmware_update: Time of the last successful firmware update.
+ :vartype last_firmware_update: ~datetime.datetime
+ :ivar pending_firmware_version: When firmwareUpdateAvailable is true, this field holds the
+ version string for the update.
+ :vartype pending_firmware_version: str
+ """
+
+ _validation = {
+ 'current_firmware_version': {'readonly': True},
+ 'firmware_update_status': {'readonly': True},
+ 'firmware_update_deadline': {'readonly': True},
+ 'last_firmware_update': {'readonly': True},
+ 'pending_firmware_version': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'current_firmware_version': {'key': 'currentFirmwareVersion', 'type': 'str'},
+ 'firmware_update_status': {'key': 'firmwareUpdateStatus', 'type': 'str'},
+ 'firmware_update_deadline': {'key': 'firmwareUpdateDeadline', 'type': 'iso-8601'},
+ 'last_firmware_update': {'key': 'lastFirmwareUpdate', 'type': 'iso-8601'},
+ 'pending_firmware_version': {'key': 'pendingFirmwareVersion', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(CacheUpgradeStatus, self).__init__(**kwargs)
+ self.current_firmware_version = None
+ self.firmware_update_status = None
+ self.firmware_update_deadline = None
+ self.last_firmware_update = None
+ self.pending_firmware_version = None
+
+
+class CacheUsernameDownloadSettingsCredentials(msrest.serialization.Model):
+ """When present, these are the credentials for the secure LDAP connection.
+
+ :param bind_dn: The Bind distinguished name identity to be used in the secure LDAP connection.
+ This value is stored encrypted and not returned on response.
+ :type bind_dn: str
+ :param bind_password: The Bind password to be used in the secure LDAP connection. This value is
+ stored encrypted and not returned on response.
+ :type bind_password: str
+ """
+
+ _attribute_map = {
+ 'bind_dn': {'key': 'bindDn', 'type': 'str'},
+ 'bind_password': {'key': 'bindPassword', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ bind_dn: Optional[str] = None,
+ bind_password: Optional[str] = None,
+ **kwargs
+ ):
+ super(CacheUsernameDownloadSettingsCredentials, self).__init__(**kwargs)
+ self.bind_dn = bind_dn
+ self.bind_password = bind_password
+
+
+class CloudErrorBody(msrest.serialization.Model):
+ """An error response.
+
+ :param code: An identifier for the error. Codes are invariant and are intended to be consumed
+ programmatically.
+ :type code: str
+ :param details: A list of additional details about the error.
+ :type details: list[~storage_cache_management_client.models.CloudErrorBody]
+ :param message: A message describing the error, intended to be suitable for display in a user
+ interface.
+ :type message: str
+ :param target: The target of the particular error. For example, the name of the property in
+ error.
+ :type target: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'details': {'key': 'details', 'type': '[CloudErrorBody]'},
+ 'message': {'key': 'message', 'type': 'str'},
+ 'target': {'key': 'target', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ code: Optional[str] = None,
+ details: Optional[List["CloudErrorBody"]] = None,
+ message: Optional[str] = None,
+ target: Optional[str] = None,
+ **kwargs
+ ):
+ super(CloudErrorBody, self).__init__(**kwargs)
+ self.code = code
+ self.details = details
+ self.message = message
+ self.target = target
+
+
+class ErrorResponse(msrest.serialization.Model):
+ """Describes the format of Error response.
+
+ :param code: Error code.
+ :type code: str
+ :param message: Error message indicating why the operation failed.
+ :type message: str
+ """
+
+ _attribute_map = {
+ 'code': {'key': 'code', 'type': 'str'},
+ 'message': {'key': 'message', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ code: Optional[str] = None,
+ message: Optional[str] = None,
+ **kwargs
+ ):
+ super(ErrorResponse, self).__init__(**kwargs)
+ self.code = code
+ self.message = message
+
+
+class MetricDimension(msrest.serialization.Model):
+ """Specifications of the Dimension of metrics.
+
+ :param name: Name of the dimension.
+ :type name: str
+ :param display_name: Localized friendly display name of the dimension.
+ :type display_name: str
+ :param internal_name: Internal name of the dimension.
+ :type internal_name: str
+ :param to_be_exported_for_shoebox: To be exported to shoe box.
+ :type to_be_exported_for_shoebox: bool
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'internal_name': {'key': 'internalName', 'type': 'str'},
+ 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ display_name: Optional[str] = None,
+ internal_name: Optional[str] = None,
+ to_be_exported_for_shoebox: Optional[bool] = None,
+ **kwargs
+ ):
+ super(MetricDimension, self).__init__(**kwargs)
+ self.name = name
+ self.display_name = display_name
+ self.internal_name = internal_name
+ self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
+
+
+class MetricSpecification(msrest.serialization.Model):
+ """Details about operation related to metrics.
+
+ :param name: The name of the metric.
+ :type name: str
+ :param display_name: Localized display name of the metric.
+ :type display_name: str
+ :param display_description: The description of the metric.
+ :type display_description: str
+ :param unit: The unit that the metric is measured in.
+ :type unit: str
+ :param aggregation_type: The type of metric aggregation.
+ :type aggregation_type: str
+ :param supported_aggregation_types: Support metric aggregation type.
+ :type supported_aggregation_types: list[str or
+ ~storage_cache_management_client.models.MetricAggregationType]
+ :param metric_class: Type of metrics.
+ :type metric_class: str
+ :param dimensions: Dimensions of the metric.
+ :type dimensions: list[~storage_cache_management_client.models.MetricDimension]
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'display_name': {'key': 'displayName', 'type': 'str'},
+ 'display_description': {'key': 'displayDescription', 'type': 'str'},
+ 'unit': {'key': 'unit', 'type': 'str'},
+ 'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
+ 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
+ 'metric_class': {'key': 'metricClass', 'type': 'str'},
+ 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ display_name: Optional[str] = None,
+ display_description: Optional[str] = None,
+ unit: Optional[str] = None,
+ aggregation_type: Optional[str] = None,
+ supported_aggregation_types: Optional[List[Union[str, "MetricAggregationType"]]] = None,
+ metric_class: Optional[str] = None,
+ dimensions: Optional[List["MetricDimension"]] = None,
+ **kwargs
+ ):
+ super(MetricSpecification, self).__init__(**kwargs)
+ self.name = name
+ self.display_name = display_name
+ self.display_description = display_description
+ self.unit = unit
+ self.aggregation_type = aggregation_type
+ self.supported_aggregation_types = supported_aggregation_types
+ self.metric_class = metric_class
+ self.dimensions = dimensions
+
+
+class NamespaceJunction(msrest.serialization.Model):
+ """A namespace junction.
+
+ :param namespace_path: Namespace path on a Cache for a Storage Target.
+ :type namespace_path: str
+ :param target_path: Path in Storage Target to which namespacePath points.
+ :type target_path: str
+ :param nfs_export: NFS export where targetPath exists.
+ :type nfs_export: str
+ :param nfs_access_policy: Name of the access policy applied to this junction.
+ :type nfs_access_policy: str
+ """
+
+ _attribute_map = {
+ 'namespace_path': {'key': 'namespacePath', 'type': 'str'},
+ 'target_path': {'key': 'targetPath', 'type': 'str'},
+ 'nfs_export': {'key': 'nfsExport', 'type': 'str'},
+ 'nfs_access_policy': {'key': 'nfsAccessPolicy', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ namespace_path: Optional[str] = None,
+ target_path: Optional[str] = None,
+ nfs_export: Optional[str] = None,
+ nfs_access_policy: Optional[str] = None,
+ **kwargs
+ ):
+ super(NamespaceJunction, self).__init__(**kwargs)
+ self.namespace_path = namespace_path
+ self.target_path = target_path
+ self.nfs_export = nfs_export
+ self.nfs_access_policy = nfs_access_policy
+
+
+class Nfs3Target(msrest.serialization.Model):
+ """Properties pertaining to the Nfs3Target.
+
+ :param target: IP address or host name of an NFSv3 host (e.g., 10.0.44.44).
+ :type target: str
+ :param usage_model: Identifies the usage model to be used for this Storage Target. Get choices
+ from .../usageModels.
+ :type usage_model: str
+ """
+
+ _validation = {
+ 'target': {'pattern': r'^[-.0-9a-zA-Z]+$'},
+ }
+
+ _attribute_map = {
+ 'target': {'key': 'target', 'type': 'str'},
+ 'usage_model': {'key': 'usageModel', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ target: Optional[str] = None,
+ usage_model: Optional[str] = None,
+ **kwargs
+ ):
+ super(Nfs3Target, self).__init__(**kwargs)
+ self.target = target
+ self.usage_model = usage_model
+
+
+class NfsAccessPolicy(msrest.serialization.Model):
+ """A set of rules describing access policies applied to NFSv3 clients of the cache.
+
+ :param name: Name identifying this policy. Access Policy names are not case sensitive.
+ :type name: str
+ :param access_rules: The set of rules describing client accesses allowed under this policy.
+ :type access_rules: list[~storage_cache_management_client.models.NfsAccessRule]
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'access_rules': {'key': 'accessRules', 'type': '[NfsAccessRule]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ access_rules: Optional[List["NfsAccessRule"]] = None,
+ **kwargs
+ ):
+ super(NfsAccessPolicy, self).__init__(**kwargs)
+ self.name = name
+ self.access_rules = access_rules
+
+
+class NfsAccessRule(msrest.serialization.Model):
+ """Rule to place restrictions on portions of the NFS namespace being presented to clients.
+
+ :param scope: Scope applied to this rule. Possible values include: "default", "network",
+ "host".
+ :type scope: str or ~storage_cache_management_client.models.NfsAccessRuleScope
+ :param filter: Filter applied to this rule. The filter's format depends on its scope.
+ 'default' scope is reserved for system use. 'network' is in CIDR format (e.g., 10.99.1.0/24)
+ and 'host' is an IP address or fully qualified domain name.
+ :type filter: str
+ :param access: Access allowed by this rule. Possible values include: "no", "ro", "rw".
+ :type access: str or ~storage_cache_management_client.models.NfsAccessRuleAccess
+ :param suid: Allow SUID semantics.
+ :type suid: bool
+ :param submount_access: Allow mounts below the junction.
+ :type submount_access: bool
+ :param root_squash: Map root accesses to anonymousUID and anonymousGID.
+ :type root_squash: bool
+ :param anonymous_uid: UID value that replaces 0 when rootSquash is true.
+ :type anonymous_uid: str
+ :param anonymous_gid: GID value that replaces 0 when rootSquash is true.
+ :type anonymous_gid: str
+ """
+
+ _attribute_map = {
+ 'scope': {'key': 'scope', 'type': 'str'},
+ 'filter': {'key': 'filter', 'type': 'str'},
+ 'access': {'key': 'access', 'type': 'str'},
+ 'suid': {'key': 'suid', 'type': 'bool'},
+ 'submount_access': {'key': 'submountAccess', 'type': 'bool'},
+ 'root_squash': {'key': 'rootSquash', 'type': 'bool'},
+ 'anonymous_uid': {'key': 'anonymousUID', 'type': 'str'},
+ 'anonymous_gid': {'key': 'anonymousGID', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ scope: Optional[Union[str, "NfsAccessRuleScope"]] = None,
+ filter: Optional[str] = None,
+ access: Optional[Union[str, "NfsAccessRuleAccess"]] = None,
+ suid: Optional[bool] = None,
+ submount_access: Optional[bool] = None,
+ root_squash: Optional[bool] = None,
+ anonymous_uid: Optional[str] = "-2",
+ anonymous_gid: Optional[str] = "-2",
+ **kwargs
+ ):
+ super(NfsAccessRule, self).__init__(**kwargs)
+ self.scope = scope
+ self.filter = filter
+ self.access = access
+ self.suid = suid
+ self.submount_access = submount_access
+ self.root_squash = root_squash
+ self.anonymous_uid = anonymous_uid
+ self.anonymous_gid = anonymous_gid
+
+
+class ResourceSku(msrest.serialization.Model):
+ """A resource SKU.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar resource_type: The type of resource the SKU applies to.
+ :vartype resource_type: str
+ :param capabilities: A list of capabilities of this SKU, such as throughput or ops/sec.
+ :type capabilities: list[~storage_cache_management_client.models.ResourceSkuCapabilities]
+ :ivar locations: The set of locations where the SKU is available. This is the supported and
+ registered Azure Geo Regions (e.g., West US, East US, Southeast Asia, etc.).
+ :vartype locations: list[str]
+ :param location_info: The set of locations where the SKU is available.
+ :type location_info: list[~storage_cache_management_client.models.ResourceSkuLocationInfo]
+ :param name: The name of this SKU.
+ :type name: str
+ :param restrictions: The restrictions preventing this SKU from being used. This is empty if
+ there are no restrictions.
+ :type restrictions: list[~storage_cache_management_client.models.Restriction]
+ """
+
+ _validation = {
+ 'resource_type': {'readonly': True},
+ 'locations': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'resource_type': {'key': 'resourceType', 'type': 'str'},
+ 'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'},
+ 'locations': {'key': 'locations', 'type': '[str]'},
+ 'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'},
+ 'name': {'key': 'name', 'type': 'str'},
+ 'restrictions': {'key': 'restrictions', 'type': '[Restriction]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ capabilities: Optional[List["ResourceSkuCapabilities"]] = None,
+ location_info: Optional[List["ResourceSkuLocationInfo"]] = None,
+ name: Optional[str] = None,
+ restrictions: Optional[List["Restriction"]] = None,
+ **kwargs
+ ):
+ super(ResourceSku, self).__init__(**kwargs)
+ self.resource_type = None
+ self.capabilities = capabilities
+ self.locations = None
+ self.location_info = location_info
+ self.name = name
+ self.restrictions = restrictions
+
+
+class ResourceSkuCapabilities(msrest.serialization.Model):
+ """A resource SKU capability.
+
+ :param name: Name of a capability, such as ops/sec.
+ :type name: str
+ :param value: Quantity, if the capability is measured by quantity.
+ :type value: str
+ """
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'value': {'key': 'value', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ name: Optional[str] = None,
+ value: Optional[str] = None,
+ **kwargs
+ ):
+ super(ResourceSkuCapabilities, self).__init__(**kwargs)
+ self.name = name
+ self.value = value
+
+
+class ResourceSkuLocationInfo(msrest.serialization.Model):
+ """Resource SKU location information.
+
+ :param location: Location where this SKU is available.
+ :type location: str
+ :param zones: Zones if any.
+ :type zones: list[str]
+ """
+
+ _attribute_map = {
+ 'location': {'key': 'location', 'type': 'str'},
+ 'zones': {'key': 'zones', 'type': '[str]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ location: Optional[str] = None,
+ zones: Optional[List[str]] = None,
+ **kwargs
+ ):
+ super(ResourceSkuLocationInfo, self).__init__(**kwargs)
+ self.location = location
+ self.zones = zones
+
+
+class ResourceSkusResult(msrest.serialization.Model):
+ """The response from the List Cache SKUs operation.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :param next_link: The URI to fetch the next page of Cache SKUs.
+ :type next_link: str
+ :ivar value: The list of SKUs available for the subscription.
+ :vartype value: list[~storage_cache_management_client.models.ResourceSku]
+ """
+
+ _validation = {
+ 'value': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[ResourceSku]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ **kwargs
+ ):
+ super(ResourceSkusResult, self).__init__(**kwargs)
+ self.next_link = next_link
+ self.value = None
+
+
+class Restriction(msrest.serialization.Model):
+ """The restrictions preventing this SKU from being used.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar type: The type of restrictions. In this version, the only possible value for this is
+ location.
+ :vartype type: str
+ :ivar values: The value of restrictions. If the restriction type is set to location, then this
+ would be the different locations where the SKU is restricted.
+ :vartype values: list[str]
+ :param reason_code: The reason for the restriction. As of now this can be "QuotaId" or
+ "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as
+ the subscription does not belong to that quota. "NotAvailableForSubscription" is related to
+ capacity at the datacenter. Possible values include: "QuotaId", "NotAvailableForSubscription".
+ :type reason_code: str or ~storage_cache_management_client.models.ReasonCode
+ """
+
+ _validation = {
+ 'type': {'readonly': True},
+ 'values': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'type': {'key': 'type', 'type': 'str'},
+ 'values': {'key': 'values', 'type': '[str]'},
+ 'reason_code': {'key': 'reasonCode', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ reason_code: Optional[Union[str, "ReasonCode"]] = None,
+ **kwargs
+ ):
+ super(Restriction, self).__init__(**kwargs)
+ self.type = None
+ self.values = None
+ self.reason_code = reason_code
+
+
+class StorageTargetResource(msrest.serialization.Model):
+ """Resource used by a Cache.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar name: Name of the Storage Target.
+ :vartype name: str
+ :ivar id: Resource ID of the Storage Target.
+ :vartype id: str
+ :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget.
+ :vartype type: str
+ :ivar location: Region name string.
+ :vartype location: str
+ :ivar system_data: The system meta data relating to this resource.
+ :vartype system_data: ~storage_cache_management_client.models.SystemData
+ """
+
+ _validation = {
+ 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'},
+ 'id': {'readonly': True},
+ 'type': {'readonly': True},
+ 'location': {'readonly': True},
+ 'system_data': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'location': {'key': 'location', 'type': 'str'},
+ 'system_data': {'key': 'systemData', 'type': 'SystemData'},
+ }
+
+ def __init__(
+ self,
+ **kwargs
+ ):
+ super(StorageTargetResource, self).__init__(**kwargs)
+ self.name = None
+ self.id = None
+ self.type = None
+ self.location = None
+ self.system_data = None
+
+
+class StorageTarget(StorageTargetResource):
+ """Type of the Storage Target.
+
+ Variables are only populated by the server, and will be ignored when sending a request.
+
+ :ivar name: Name of the Storage Target.
+ :vartype name: str
+ :ivar id: Resource ID of the Storage Target.
+ :vartype id: str
+ :ivar type: Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget.
+ :vartype type: str
+ :ivar location: Region name string.
+ :vartype location: str
+ :ivar system_data: The system meta data relating to this resource.
+ :vartype system_data: ~storage_cache_management_client.models.SystemData
+ :param junctions: List of Cache namespace junctions to target for namespace associations.
+ :type junctions: list[~storage_cache_management_client.models.NamespaceJunction]
+ :param target_type: Type of the Storage Target. Possible values include: "nfs3", "clfs",
+ "unknown".
+ :type target_type: str or ~storage_cache_management_client.models.StorageTargetType
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property. Possible values include:
+ "Succeeded", "Failed", "Cancelled", "Creating", "Deleting", "Updating".
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param nfs3: Properties when targetType is nfs3.
+ :type nfs3: ~storage_cache_management_client.models.Nfs3Target
+ :param unknown_map: Dictionary of string->string pairs containing information about the Storage
+ Target.
+ :type unknown_map: dict[str, str]
+ :param target: Resource ID of storage container.
+ :type target: str
+ """
+
+ _validation = {
+ 'name': {'readonly': True, 'pattern': r'^[-0-9a-zA-Z_]{1,80}$'},
+ 'id': {'readonly': True},
+ 'type': {'readonly': True},
+ 'location': {'readonly': True},
+ 'system_data': {'readonly': True},
+ }
+
+ _attribute_map = {
+ 'name': {'key': 'name', 'type': 'str'},
+ 'id': {'key': 'id', 'type': 'str'},
+ 'type': {'key': 'type', 'type': 'str'},
+ 'location': {'key': 'location', 'type': 'str'},
+ 'system_data': {'key': 'systemData', 'type': 'SystemData'},
+ 'junctions': {'key': 'properties.junctions', 'type': '[NamespaceJunction]'},
+ 'target_type': {'key': 'properties.targetType', 'type': 'str'},
+ 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
+ 'nfs3': {'key': 'properties.nfs3', 'type': 'Nfs3Target'},
+ 'unknown_map': {'key': 'unknown.unknownMap', 'type': '{str}'},
+ 'target': {'key': 'clfs.target', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ junctions: Optional[List["NamespaceJunction"]] = None,
+ target_type: Optional[Union[str, "StorageTargetType"]] = None,
+ provisioning_state: Optional[Union[str, "ProvisioningStateType"]] = None,
+ nfs3: Optional["Nfs3Target"] = None,
+ unknown_map: Optional[Dict[str, str]] = None,
+ target: Optional[str] = None,
+ **kwargs
+ ):
+ super(StorageTarget, self).__init__(**kwargs)
+ self.junctions = junctions
+ self.target_type = target_type
+ self.provisioning_state = provisioning_state
+ self.nfs3 = nfs3
+ self.unknown_map = unknown_map
+ self.target = target
+
+
+class StorageTargetsResult(msrest.serialization.Model):
+ """A list of Storage Targets.
+
+ :param next_link: The URI to fetch the next page of Storage Targets.
+ :type next_link: str
+ :param value: The list of Storage Targets defined for the Cache.
+ :type value: list[~storage_cache_management_client.models.StorageTarget]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[StorageTarget]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["StorageTarget"]] = None,
+ **kwargs
+ ):
+ super(StorageTargetsResult, self).__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
+
+
+class SystemData(msrest.serialization.Model):
+ """Metadata pertaining to creation and last modification of the resource.
+
+ :param created_by: The identity that created the resource.
+ :type created_by: str
+ :param created_by_type: The type of identity that created the resource. Possible values
+ include: "User", "Application", "ManagedIdentity", "Key".
+ :type created_by_type: str or ~storage_cache_management_client.models.CreatedByType
+ :param created_at: The timestamp of resource creation (UTC).
+ :type created_at: ~datetime.datetime
+ :param last_modified_by: The identity that last modified the resource.
+ :type last_modified_by: str
+ :param last_modified_by_type: The type of identity that last modified the resource. Possible
+ values include: "User", "Application", "ManagedIdentity", "Key".
+ :type last_modified_by_type: str or ~storage_cache_management_client.models.CreatedByType
+ :param last_modified_at: The type of identity that last modified the resource.
+ :type last_modified_at: ~datetime.datetime
+ """
+
+ _attribute_map = {
+ 'created_by': {'key': 'createdBy', 'type': 'str'},
+ 'created_by_type': {'key': 'createdByType', 'type': 'str'},
+ 'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
+ 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
+ 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
+ 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
+ }
+
+ def __init__(
+ self,
+ *,
+ created_by: Optional[str] = None,
+ created_by_type: Optional[Union[str, "CreatedByType"]] = None,
+ created_at: Optional[datetime.datetime] = None,
+ last_modified_by: Optional[str] = None,
+ last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
+ last_modified_at: Optional[datetime.datetime] = None,
+ **kwargs
+ ):
+ super(SystemData, self).__init__(**kwargs)
+ self.created_by = created_by
+ self.created_by_type = created_by_type
+ self.created_at = created_at
+ self.last_modified_by = last_modified_by
+ self.last_modified_by_type = last_modified_by_type
+ self.last_modified_at = last_modified_at
+
+
+class UsageModel(msrest.serialization.Model):
+ """A usage model.
+
+ :param display: Localized information describing this usage model.
+ :type display: ~storage_cache_management_client.models.UsageModelDisplay
+ :param model_name: Non-localized keyword name for this usage model.
+ :type model_name: str
+ :param target_type: The type of Storage Target to which this model is applicable (only nfs3 as
+ of this version).
+ :type target_type: str
+ """
+
+ _attribute_map = {
+ 'display': {'key': 'display', 'type': 'UsageModelDisplay'},
+ 'model_name': {'key': 'modelName', 'type': 'str'},
+ 'target_type': {'key': 'targetType', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ display: Optional["UsageModelDisplay"] = None,
+ model_name: Optional[str] = None,
+ target_type: Optional[str] = None,
+ **kwargs
+ ):
+ super(UsageModel, self).__init__(**kwargs)
+ self.display = display
+ self.model_name = model_name
+ self.target_type = target_type
+
+
+class UsageModelDisplay(msrest.serialization.Model):
+ """Localized information describing this usage model.
+
+ :param description: String to display for this usage model.
+ :type description: str
+ """
+
+ _attribute_map = {
+ 'description': {'key': 'description', 'type': 'str'},
+ }
+
+ def __init__(
+ self,
+ *,
+ description: Optional[str] = None,
+ **kwargs
+ ):
+ super(UsageModelDisplay, self).__init__(**kwargs)
+ self.description = description
+
+
+class UsageModelsResult(msrest.serialization.Model):
+ """A list of Cache usage models.
+
+ :param next_link: The URI to fetch the next page of Cache usage models.
+ :type next_link: str
+ :param value: The list of usage models available for the subscription.
+ :type value: list[~storage_cache_management_client.models.UsageModel]
+ """
+
+ _attribute_map = {
+ 'next_link': {'key': 'nextLink', 'type': 'str'},
+ 'value': {'key': 'value', 'type': '[UsageModel]'},
+ }
+
+ def __init__(
+ self,
+ *,
+ next_link: Optional[str] = None,
+ value: Optional[List["UsageModel"]] = None,
+ **kwargs
+ ):
+ super(UsageModelsResult, self).__init__(**kwargs)
+ self.next_link = next_link
+ self.value = value
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py
new file mode 100644
index 00000000000..907e68e817c
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/models/_storage_cache_management_client_enums.py
@@ -0,0 +1,147 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from enum import Enum, EnumMeta
+from six import with_metaclass
+
+class _CaseInsensitiveEnumMeta(EnumMeta):
+ def __getitem__(self, name):
+ return super().__getitem__(name.upper())
+
+ def __getattr__(cls, name):
+ """Return the enum member matching `name`
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+ """
+ try:
+ return cls._member_map_[name.upper()]
+ except KeyError:
+ raise AttributeError(name)
+
+
+class CacheIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """The type of identity used for the cache
+ """
+
+ SYSTEM_ASSIGNED = "SystemAssigned"
+ NONE = "None"
+
+class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """The type of identity that created the resource.
+ """
+
+ USER = "User"
+ APPLICATION = "Application"
+ MANAGED_IDENTITY = "ManagedIdentity"
+ KEY = "Key"
+
+class DomainJoinedType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """This field indicates if the HPC Cache is joined to the Active Directory domain.
+ """
+
+ YES = "Yes"
+ NO = "No"
+ ERROR = "Error"
+
+class FirmwareStatusType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """True if there is a firmware update ready to install on this Cache. The firmware will
+ automatically be installed after firmwareUpdateDeadline if not triggered earlier via the
+ upgrade operation.
+ """
+
+ AVAILABLE = "available"
+ UNAVAILABLE = "unavailable"
+
+class HealthStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """List of Cache health states.
+ """
+
+ UNKNOWN = "Unknown"
+ HEALTHY = "Healthy"
+ DEGRADED = "Degraded"
+ DOWN = "Down"
+ TRANSITIONING = "Transitioning"
+ STOPPING = "Stopping"
+ STOPPED = "Stopped"
+ UPGRADING = "Upgrading"
+ FLUSHING = "Flushing"
+
+class MetricAggregationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+
+ NOT_SPECIFIED = "NotSpecified"
+ NONE = "None"
+ AVERAGE = "Average"
+ MINIMUM = "Minimum"
+ MAXIMUM = "Maximum"
+ TOTAL = "Total"
+ COUNT = "Count"
+
+class NfsAccessRuleAccess(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """Access allowed by this rule.
+ """
+
+ NO = "no"
+ RO = "ro"
+ RW = "rw"
+
+class NfsAccessRuleScope(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """Scope applied to this rule.
+ """
+
+ DEFAULT = "default"
+ NETWORK = "network"
+ HOST = "host"
+
+class ProvisioningStateType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """ARM provisioning state, see https://github.com/Azure/azure-resource-manager-
+ rpc/blob/master/v1.0/Addendum.md#provisioningstate-property
+ """
+
+ SUCCEEDED = "Succeeded"
+ FAILED = "Failed"
+ CANCELLED = "Cancelled"
+ CREATING = "Creating"
+ DELETING = "Deleting"
+ UPDATING = "Updating"
+
+class ReasonCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """The reason for the restriction. As of now this can be "QuotaId" or
+ "NotAvailableForSubscription". "QuotaId" is set when the SKU has requiredQuotas parameter as
+ the subscription does not belong to that quota. "NotAvailableForSubscription" is related to
+ capacity at the datacenter.
+ """
+
+ QUOTA_ID = "QuotaId"
+ NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
+
+class StorageTargetType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """Type of the Storage Target.
+ """
+
+ NFS3 = "nfs3"
+ CLFS = "clfs"
+ UNKNOWN = "unknown"
+
+class UsernameDownloadedType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """Indicates if the HPC Cache has performed the username download successfully.
+ """
+
+ YES = "Yes"
+ NO = "No"
+ ERROR = "Error"
+
+class UsernameSource(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
+ """This setting determines how the system gets username and group names for clients.
+ """
+
+ AD = "AD"
+ LDAP = "LDAP"
+ FILE = "File"
+ NONE = "None"
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py
new file mode 100644
index 00000000000..52d521bf575
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/__init__.py
@@ -0,0 +1,23 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+
+from ._operation_operations import OperationOperations
+from ._sku_operations import SkuOperations
+from ._usage_model_operations import UsageModelOperations
+from ._asc_operation_operations import ASCOperationOperations
+from ._cache_operations import CacheOperations
+from ._storage_target_operations import StorageTargetOperations
+
+__all__ = [
+ 'OperationOperations',
+ 'SkuOperations',
+ 'UsageModelOperations',
+ 'ASCOperationOperations',
+ 'CacheOperations',
+ 'StorageTargetOperations',
+]
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py
new file mode 100644
index 00000000000..3331ee3fd68
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_asc_operation_operations.py
@@ -0,0 +1,104 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Optional, TypeVar
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class ASCOperationOperations(object):
+ """ASCOperationOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def get(
+ self,
+ location, # type: str
+ operation_id, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> "models.ASCOperation"
+ """Gets the status of an asynchronous operation for the Azure HPC Cache.
+
+ :param location: The name of the region used to look up the operation.
+ :type location: str
+ :param operation_id: The operation id which uniquely identifies the asynchronous operation.
+ :type operation_id: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: ASCOperation, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.ASCOperation
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.ASCOperation"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'location': self._serialize.url("location", location, 'str'),
+ 'operationId': self._serialize.url("operation_id", operation_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('ASCOperation', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/locations/{location}/ascOperations/{operationId}'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py
new file mode 100644
index 00000000000..36f9c71ff65
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_cache_operations.py
@@ -0,0 +1,1251 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class CacheOperations(object):
+ """CacheOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["models.CachesListResult"]
+ """Returns all Caches the user has access to under a subscription.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CachesListResult or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.CachesListResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('CachesListResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/caches'} # type: ignore
+
+ def list_by_resource_group(
+ self,
+ resource_group_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["models.CachesListResult"]
+ """Returns all Caches the user has access to under a resource group.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either CachesListResult or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.CachesListResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.CachesListResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_by_resource_group.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('CachesListResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches'} # type: ignore
+
+ def _delete_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._delete_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ def begin_delete(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller[None]
+ """Schedules a Cache for deletion.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ def get(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> "models.Cache"
+ """Returns a Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Cache, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.Cache
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ tags=None, # type: Optional[object]
+ location=None, # type: Optional[str]
+ cache_size_gb=None, # type: Optional[int]
+ provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]]
+ subnet=None, # type: Optional[str]
+ upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"]
+ extended_groups_enabled=None, # type: Optional[bool]
+ username_source="None", # type: Optional[Union[str, "models.UsernameSource"]]
+ group_file_uri=None, # type: Optional[str]
+ user_file_uri=None, # type: Optional[str]
+ ldap_server=None, # type: Optional[str]
+ ldap_base_dn=None, # type: Optional[str]
+ encrypt_ldap_connection=None, # type: Optional[bool]
+ require_valid_certificate=None, # type: Optional[bool]
+ auto_download_certificate=None, # type: Optional[bool]
+ ca_certificate_uri=None, # type: Optional[str]
+ credentials=None, # type: Optional["models.CacheUsernameDownloadSettingsCredentials"]
+ primary_dns_ip_address=None, # type: Optional[str]
+ secondary_dns_ip_address=None, # type: Optional[str]
+ domain_name=None, # type: Optional[str]
+ domain_net_bios=None, # type: Optional[str]
+ smb_server_name=None, # type: Optional[str]
+ access_policies=None, # type: Optional[List["models.NfsAccessPolicy"]]
+ key_url=None, # type: Optional[str]
+ id=None, # type: Optional[str]
+ mtu=1500, # type: Optional[int]
+ name=None, # type: Optional[str]
+ type=None, # type: Optional[Union[str, "models.CacheIdentityType"]]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Optional["models.Cache"]
+ cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.Cache"]]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, extended_groups_enabled=extended_groups_enabled, username_source=username_source, group_file_uri=group_file_uri, user_file_uri=user_file_uri, ldap_server=ldap_server, ldap_base_dn=ldap_base_dn, encrypt_ldap_connection=encrypt_ldap_connection, require_valid_certificate=require_valid_certificate, auto_download_certificate=auto_download_certificate, ca_certificate_uri=ca_certificate_uri, credentials_directory_services_settings_username_download_credentials=credentials, primary_dns_ip_address=primary_dns_ip_address, secondary_dns_ip_address=secondary_dns_ip_address, domain_name=domain_name, domain_net_bios=domain_net_bios, smb_server_name=smb_server_name, access_policies=access_policies, key_url=key_url, id_encryption_settings_key_encryption_key_source_vault_id=id, mtu=mtu, name_sku_name=name, type_identity_type=type)
+ api_version = "2020-10-01"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self._create_or_update_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ if cache is not None:
+ body_content = self._serialize.body(cache, 'Cache')
+ else:
+ body_content = None
+ body_content_kwargs['content'] = body_content
+ request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if response.status_code == 201:
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ def begin_create_or_update(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ tags=None, # type: Optional[object]
+ location=None, # type: Optional[str]
+ cache_size_gb=None, # type: Optional[int]
+ provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]]
+ subnet=None, # type: Optional[str]
+ upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"]
+ extended_groups_enabled=None, # type: Optional[bool]
+ username_source="None", # type: Optional[Union[str, "models.UsernameSource"]]
+ group_file_uri=None, # type: Optional[str]
+ user_file_uri=None, # type: Optional[str]
+ ldap_server=None, # type: Optional[str]
+ ldap_base_dn=None, # type: Optional[str]
+ encrypt_ldap_connection=None, # type: Optional[bool]
+ require_valid_certificate=None, # type: Optional[bool]
+ auto_download_certificate=None, # type: Optional[bool]
+ ca_certificate_uri=None, # type: Optional[str]
+ credentials=None, # type: Optional["models.CacheUsernameDownloadSettingsCredentials"]
+ primary_dns_ip_address=None, # type: Optional[str]
+ secondary_dns_ip_address=None, # type: Optional[str]
+ domain_name=None, # type: Optional[str]
+ domain_net_bios=None, # type: Optional[str]
+ smb_server_name=None, # type: Optional[str]
+ access_policies=None, # type: Optional[List["models.NfsAccessPolicy"]]
+ key_url=None, # type: Optional[str]
+ id=None, # type: Optional[str]
+ mtu=1500, # type: Optional[int]
+ name=None, # type: Optional[str]
+ type=None, # type: Optional[Union[str, "models.CacheIdentityType"]]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller["models.Cache"]
+ """Create or update a Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param tags: ARM tags as name/value pairs.
+ :type tags: object
+ :param location: Region name string.
+ :type location: str
+ :param cache_size_gb: The size of this Cache, in GB.
+ :type cache_size_gb: int
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property.
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param subnet: Subnet used for the Cache.
+ :type subnet: str
+ :param upgrade_status: Upgrade status of the Cache.
+ :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus
+ :param extended_groups_enabled: This indicates if Extended Groups is enabled.
+ :type extended_groups_enabled: bool
+ :param username_source: This setting determines how the system gets username and group names
+ for clients.
+ :type username_source: str or ~storage_cache_management_client.models.UsernameSource
+ :param group_file_uri: The URI of the file containing the group information (in /etc/group file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type group_file_uri: str
+ :param user_file_uri: The URI of the file containing the user information (in /etc/passwd file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type user_file_uri: str
+ :param ldap_server: The fully qualified domain name or IP address of the LDAP server to use.
+ :type ldap_server: str
+ :param ldap_base_dn: The base distinguished name for the LDAP domain.
+ :type ldap_base_dn: str
+ :param encrypt_ldap_connection: This indicates if the LDAP connection should be encrypted.
+ :type encrypt_ldap_connection: bool
+ :param require_valid_certificate: Determines if the certificates should be validated by a
+ certificate authority. When true, caCertificateURI must be provided.
+ :type require_valid_certificate: bool
+ :param auto_download_certificate: Determines if the certificate should be automatically
+ downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a
+ self signed certificate otherwise.
+ :type auto_download_certificate: bool
+ :param ca_certificate_uri: The URI of the CA certificate to validate the LDAP secure
+ connection. This field must be populated when 'requireValidCertificate' is set to true.
+ :type ca_certificate_uri: str
+ :param credentials: When present, these are the credentials for the secure LDAP connection.
+ :type credentials: ~storage_cache_management_client.models.CacheUsernameDownloadSettingsCredentials
+ :param primary_dns_ip_address: Primary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type primary_dns_ip_address: str
+ :param secondary_dns_ip_address: Secondary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type secondary_dns_ip_address: str
+ :param domain_name: The fully qualified domain name of the Active Directory domain controller.
+ :type domain_name: str
+ :param domain_net_bios: The Active Directory domain's NetBIOS name.
+ :type domain_net_bios: str
+ :param smb_server_name: The name (NetBIOS) used for the HPC Cache to join the Active Directory
+ domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type smb_server_name: str
+ :param access_policies: NFS access policies defined for this cache.
+ :type access_policies: list[~storage_cache_management_client.models.NfsAccessPolicy]
+ :param key_url: The URL referencing a key encryption key in Key Vault.
+ :type key_url: str
+ :param id: Resource Id.
+ :type id: str
+ :param mtu: The IPv4 maximum transmission unit configured for the subnet.
+ :type mtu: int
+ :param name: SKU name for this Cache.
+ :type name: str
+ :param type: The type of identity used for the cache.
+ :type type: str or ~storage_cache_management_client.models.CacheIdentityType
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either Cache or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~storage_cache_management_client.models.Cache]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ tags=tags,
+ location=location,
+ cache_size_gb=cache_size_gb,
+ provisioning_state=provisioning_state,
+ subnet=subnet,
+ upgrade_status=upgrade_status,
+ extended_groups_enabled=extended_groups_enabled,
+ username_source=username_source,
+ group_file_uri=group_file_uri,
+ user_file_uri=user_file_uri,
+ ldap_server=ldap_server,
+ ldap_base_dn=ldap_base_dn,
+ encrypt_ldap_connection=encrypt_ldap_connection,
+ require_valid_certificate=require_valid_certificate,
+ auto_download_certificate=auto_download_certificate,
+ ca_certificate_uri=ca_certificate_uri,
+ credentials=credentials,
+ primary_dns_ip_address=primary_dns_ip_address,
+ secondary_dns_ip_address=secondary_dns_ip_address,
+ domain_name=domain_name,
+ domain_net_bios=domain_net_bios,
+ smb_server_name=smb_server_name,
+ access_policies=access_policies,
+ key_url=key_url,
+ id=id,
+ mtu=mtu,
+ name=name,
+ type=type,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ def update(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ tags=None, # type: Optional[object]
+ location=None, # type: Optional[str]
+ cache_size_gb=None, # type: Optional[int]
+ provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]]
+ subnet=None, # type: Optional[str]
+ upgrade_status=None, # type: Optional["models.CacheUpgradeStatus"]
+ extended_groups_enabled=None, # type: Optional[bool]
+ username_source="None", # type: Optional[Union[str, "models.UsernameSource"]]
+ group_file_uri=None, # type: Optional[str]
+ user_file_uri=None, # type: Optional[str]
+ ldap_server=None, # type: Optional[str]
+ ldap_base_dn=None, # type: Optional[str]
+ encrypt_ldap_connection=None, # type: Optional[bool]
+ require_valid_certificate=None, # type: Optional[bool]
+ auto_download_certificate=None, # type: Optional[bool]
+ ca_certificate_uri=None, # type: Optional[str]
+ credentials=None, # type: Optional["models.CacheUsernameDownloadSettingsCredentials"]
+ primary_dns_ip_address=None, # type: Optional[str]
+ secondary_dns_ip_address=None, # type: Optional[str]
+ domain_name=None, # type: Optional[str]
+ domain_net_bios=None, # type: Optional[str]
+ smb_server_name=None, # type: Optional[str]
+ access_policies=None, # type: Optional[List["models.NfsAccessPolicy"]]
+ key_url=None, # type: Optional[str]
+ id=None, # type: Optional[str]
+ mtu=1500, # type: Optional[int]
+ name=None, # type: Optional[str]
+ type=None, # type: Optional[Union[str, "models.CacheIdentityType"]]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> "models.Cache"
+ """Update a Cache instance.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param tags: ARM tags as name/value pairs.
+ :type tags: object
+ :param location: Region name string.
+ :type location: str
+ :param cache_size_gb: The size of this Cache, in GB.
+ :type cache_size_gb: int
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property.
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param subnet: Subnet used for the Cache.
+ :type subnet: str
+ :param upgrade_status: Upgrade status of the Cache.
+ :type upgrade_status: ~storage_cache_management_client.models.CacheUpgradeStatus
+ :param extended_groups_enabled: This indicates if Extended Groups is enabled.
+ :type extended_groups_enabled: bool
+ :param username_source: This setting determines how the system gets username and group names
+ for clients.
+ :type username_source: str or ~storage_cache_management_client.models.UsernameSource
+ :param group_file_uri: The URI of the file containing the group information (in /etc/group file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type group_file_uri: str
+ :param user_file_uri: The URI of the file containing the user information (in /etc/passwd file
+ format). This field must be populated when 'usernameSource' is set to 'File'.
+ :type user_file_uri: str
+ :param ldap_server: The fully qualified domain name or IP address of the LDAP server to use.
+ :type ldap_server: str
+ :param ldap_base_dn: The base distinguished name for the LDAP domain.
+ :type ldap_base_dn: str
+ :param encrypt_ldap_connection: This indicates if the LDAP connection should be encrypted.
+ :type encrypt_ldap_connection: bool
+ :param require_valid_certificate: Determines if the certificates should be validated by a
+ certificate authority. When true, caCertificateURI must be provided.
+ :type require_valid_certificate: bool
+ :param auto_download_certificate: Determines if the certificate should be automatically
+ downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a
+ self signed certificate otherwise.
+ :type auto_download_certificate: bool
+ :param ca_certificate_uri: The URI of the CA certificate to validate the LDAP secure
+ connection. This field must be populated when 'requireValidCertificate' is set to true.
+ :type ca_certificate_uri: str
+ :param credentials: When present, these are the credentials for the secure LDAP connection.
+ :type credentials: ~storage_cache_management_client.models.CacheUsernameDownloadSettingsCredentials
+ :param primary_dns_ip_address: Primary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type primary_dns_ip_address: str
+ :param secondary_dns_ip_address: Secondary DNS IP address used to resolve the Active Directory
+ domain controller's fully qualified domain name.
+ :type secondary_dns_ip_address: str
+ :param domain_name: The fully qualified domain name of the Active Directory domain controller.
+ :type domain_name: str
+ :param domain_net_bios: The Active Directory domain's NetBIOS name.
+ :type domain_net_bios: str
+ :param smb_server_name: The name (NetBIOS) used for the HPC Cache to join the Active Directory
+ domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type smb_server_name: str
+ :param access_policies: NFS access policies defined for this cache.
+ :type access_policies: list[~storage_cache_management_client.models.NfsAccessPolicy]
+ :param key_url: The URL referencing a key encryption key in Key Vault.
+ :type key_url: str
+ :param id: Resource Id.
+ :type id: str
+ :param mtu: The IPv4 maximum transmission unit configured for the subnet.
+ :type mtu: int
+ :param name: SKU name for this Cache.
+ :type name: str
+ :param type: The type of identity used for the cache.
+ :type type: str or ~storage_cache_management_client.models.CacheIdentityType
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: Cache, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.Cache
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.Cache"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ cache = models.Cache(tags=tags, location=location, cache_size_gb=cache_size_gb, provisioning_state=provisioning_state, subnet=subnet, upgrade_status=upgrade_status, extended_groups_enabled=extended_groups_enabled, username_source=username_source, group_file_uri=group_file_uri, user_file_uri=user_file_uri, ldap_server=ldap_server, ldap_base_dn=ldap_base_dn, encrypt_ldap_connection=encrypt_ldap_connection, require_valid_certificate=require_valid_certificate, auto_download_certificate=auto_download_certificate, ca_certificate_uri=ca_certificate_uri, credentials_directory_services_settings_username_download_credentials=credentials, primary_dns_ip_address=primary_dns_ip_address, secondary_dns_ip_address=secondary_dns_ip_address, domain_name=domain_name, domain_net_bios=domain_net_bios, smb_server_name=smb_server_name, access_policies=access_policies, key_url=key_url, id_encryption_settings_key_encryption_key_source_vault_id=id, mtu=mtu, name_sku_name=name, type_identity_type=type)
+ api_version = "2020-10-01"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self.update.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ if cache is not None:
+ body_content = self._serialize.body(cache, 'Cache')
+ else:
+ body_content = None
+ body_content_kwargs['content'] = body_content
+ request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('Cache', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}'} # type: ignore
+
+ def _flush_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._flush_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _flush_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore
+
+ def begin_flush(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller[None]
+ """Tells a Cache to write all dirty data to the Storage Target(s). During the flush, clients will
+ see errors returned until the flush is complete.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._flush_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_flush.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/flush'} # type: ignore
+
+ def _start_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._start_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore
+
+ def begin_start(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller[None]
+ """Tells a Stopped state Cache to transition to Active state.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._start_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/start'} # type: ignore
+
+ def _stop_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._stop_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore
+
+ def begin_stop(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller[None]
+ """Tells an Active Cache to transition to Stopped state.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._stop_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/stop'} # type: ignore
+
+ def _upgrade_firmware_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._upgrade_firmware_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.post(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _upgrade_firmware_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore
+
+ def begin_upgrade_firmware(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller[None]
+ """Upgrade a Cache's firmware if a new version is available. Otherwise, this operation has no
+ effect.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._upgrade_firmware_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_upgrade_firmware.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/upgrade'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py
new file mode 100644
index 00000000000..40befe5202f
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_operation_operations.py
@@ -0,0 +1,109 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class OperationOperations(object):
+ """OperationOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["models.ApiOperationListResult"]
+ """Lists all of the available Resource Provider operations.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either ApiOperationListResult or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.ApiOperationListResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.ApiOperationListResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('ApiOperationListResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/providers/Microsoft.StorageCache/operations'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py
new file mode 100644
index 00000000000..8ece36afb95
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_sku_operations.py
@@ -0,0 +1,113 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class SkuOperations(object):
+ """SkuOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["models.ResourceSkusResult"]
+ """Get the list of StorageCache.Cache SKUs available to this subscription.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either ResourceSkusResult or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.ResourceSkusResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceSkusResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('ResourceSkusResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py
new file mode 100644
index 00000000000..7a72a967380
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_storage_target_operations.py
@@ -0,0 +1,484 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+from azure.core.polling import LROPoller, NoPolling, PollingMethod
+from azure.mgmt.core.exceptions import ARMErrorFormat
+from azure.mgmt.core.polling.arm_polling import ARMPolling
+
+from .. import models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class StorageTargetOperations(object):
+ """StorageTargetOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list_by_cache(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["models.StorageTargetsResult"]
+ """Returns a list of Storage Targets for the specified Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either StorageTargetsResult or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.StorageTargetsResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTargetsResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list_by_cache.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('StorageTargetsResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list_by_cache.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets'} # type: ignore
+
+ def _delete_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ storage_target_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self._delete_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.delete(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 202, 204]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ def begin_delete(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ storage_target_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller[None]
+ """Removes a Storage Target from a Cache. This operation is allowed at any time, but if the Cache
+ is down or unhealthy, the actual removal of the Storage Target may be delayed until the Cache
+ is healthy again. Note that if the Cache has data to flush to the Storage Target, the data will
+ be flushed before the Storage Target will be deleted.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param storage_target_name: Name of Storage Target.
+ :type storage_target_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either None or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[None]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType[None]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._delete_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ if cls:
+ return cls(pipeline_response, None, {})
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ def get(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ storage_target_name, # type: str
+ **kwargs # type: Any
+ ):
+ # type: (...) -> "models.StorageTarget"
+ """Returns a Storage Target from a Cache.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param storage_target_name: Name of the Storage Target. Length of name must not be greater than
+ 80 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type storage_target_name: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: StorageTarget, or the result of cls(response)
+ :rtype: ~storage_cache_management_client.models.StorageTarget
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ # Construct URL
+ url = self.get.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ def _create_or_update_initial(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ storage_target_name, # type: str
+ junctions=None, # type: Optional[List["models.NamespaceJunction"]]
+ target_type=None, # type: Optional[Union[str, "models.StorageTargetType"]]
+ provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]]
+ nfs3=None, # type: Optional["models.Nfs3Target"]
+ unknown_map=None, # type: Optional[Dict[str, str]]
+ target=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Optional["models.StorageTarget"]
+ cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.StorageTarget"]]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+
+ storagetarget = models.StorageTarget(junctions=junctions, target_type=target_type, provisioning_state=provisioning_state, nfs3=nfs3, unknown_map=unknown_map, target=target)
+ api_version = "2020-10-01"
+ content_type = kwargs.pop("content_type", "application/json")
+ accept = "application/json"
+
+ # Construct URL
+ url = self._create_or_update_initial.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ body_content_kwargs = {} # type: Dict[str, Any]
+ if storagetarget is not None:
+ body_content = self._serialize.body(storagetarget, 'StorageTarget')
+ else:
+ body_content = None
+ body_content_kwargs['content'] = body_content
+ request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200, 201, 202]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ deserialized = None
+ if response.status_code == 200:
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if response.status_code == 201:
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+
+ return deserialized
+ _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
+
+ def begin_create_or_update(
+ self,
+ resource_group_name, # type: str
+ cache_name, # type: str
+ storage_target_name, # type: str
+ junctions=None, # type: Optional[List["models.NamespaceJunction"]]
+ target_type=None, # type: Optional[Union[str, "models.StorageTargetType"]]
+ provisioning_state=None, # type: Optional[Union[str, "models.ProvisioningStateType"]]
+ nfs3=None, # type: Optional["models.Nfs3Target"]
+ unknown_map=None, # type: Optional[Dict[str, str]]
+ target=None, # type: Optional[str]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> LROPoller["models.StorageTarget"]
+ """Create or update a Storage Target. This operation is allowed at any time, but if the Cache is
+ down or unhealthy, the actual creation/modification of the Storage Target may be delayed until
+ the Cache is healthy again.
+
+ :param resource_group_name: Target resource group.
+ :type resource_group_name: str
+ :param cache_name: Name of Cache. Length of name must not be greater than 80 and chars must be
+ from the [-0-9a-zA-Z_] char class.
+ :type cache_name: str
+ :param storage_target_name: Name of the Storage Target. Length of name must not be greater than
+ 80 and chars must be from the [-0-9a-zA-Z_] char class.
+ :type storage_target_name: str
+ :param junctions: List of Cache namespace junctions to target for namespace associations.
+ :type junctions: list[~storage_cache_management_client.models.NamespaceJunction]
+ :param target_type: Type of the Storage Target.
+ :type target_type: str or ~storage_cache_management_client.models.StorageTargetType
+ :param provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-
+ manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property.
+ :type provisioning_state: str or ~storage_cache_management_client.models.ProvisioningStateType
+ :param nfs3: Properties when targetType is nfs3.
+ :type nfs3: ~storage_cache_management_client.models.Nfs3Target
+ :param unknown_map: Dictionary of string->string pairs containing information about the Storage
+ Target.
+ :type unknown_map: dict[str, str]
+ :param target: Resource ID of storage container.
+ :type target: str
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :keyword str continuation_token: A continuation token to restart a poller from a saved state.
+ :keyword polling: True for ARMPolling, False for no polling, or a
+ polling object for personal polling strategy
+ :paramtype polling: bool or ~azure.core.polling.PollingMethod
+ :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
+ :return: An instance of LROPoller that returns either StorageTarget or the result of cls(response)
+ :rtype: ~azure.core.polling.LROPoller[~storage_cache_management_client.models.StorageTarget]
+ :raises ~azure.core.exceptions.HttpResponseError:
+ """
+ polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
+ cls = kwargs.pop('cls', None) # type: ClsType["models.StorageTarget"]
+ lro_delay = kwargs.pop(
+ 'polling_interval',
+ self._config.polling_interval
+ )
+ cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
+ if cont_token is None:
+ raw_result = self._create_or_update_initial(
+ resource_group_name=resource_group_name,
+ cache_name=cache_name,
+ storage_target_name=storage_target_name,
+ junctions=junctions,
+ target_type=target_type,
+ provisioning_state=provisioning_state,
+ nfs3=nfs3,
+ unknown_map=unknown_map,
+ target=target,
+ cls=lambda x,y,z: x,
+ **kwargs
+ )
+
+ kwargs.pop('error_map', None)
+ kwargs.pop('content_type', None)
+
+ def get_long_running_output(pipeline_response):
+ deserialized = self._deserialize('StorageTarget', pipeline_response)
+
+ if cls:
+ return cls(pipeline_response, deserialized, {})
+ return deserialized
+
+ path_format_arguments = {
+ 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ 'cacheName': self._serialize.url("cache_name", cache_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ 'storageTargetName': self._serialize.url("storage_target_name", storage_target_name, 'str', pattern=r'^[-0-9a-zA-Z_]{1,80}$'),
+ }
+
+ if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
+ elif polling is False: polling_method = NoPolling()
+ else: polling_method = polling
+ if cont_token:
+ return LROPoller.from_continuation_token(
+ polling_method=polling_method,
+ continuation_token=cont_token,
+ client=self._client,
+ deserialization_callback=get_long_running_output
+ )
+ else:
+ return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
+ begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StorageCache/caches/{cacheName}/storageTargets/{storageTargetName}'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py
new file mode 100644
index 00000000000..d7cc003f4de
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/operations/_usage_model_operations.py
@@ -0,0 +1,113 @@
+# coding=utf-8
+# --------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# Code generated by Microsoft (R) AutoRest Code Generator.
+# Changes may cause incorrect behavior and will be lost if the code is regenerated.
+# --------------------------------------------------------------------------
+from typing import TYPE_CHECKING
+import warnings
+
+from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
+from azure.core.paging import ItemPaged
+from azure.core.pipeline import PipelineResponse
+from azure.core.pipeline.transport import HttpRequest, HttpResponse
+from azure.mgmt.core.exceptions import ARMErrorFormat
+
+from .. import models
+
+if TYPE_CHECKING:
+ # pylint: disable=unused-import,ungrouped-imports
+ from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
+
+ T = TypeVar('T')
+ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
+
+class UsageModelOperations(object):
+ """UsageModelOperations operations.
+
+ You should not instantiate this class directly. Instead, you should create a Client instance that
+ instantiates it for you and attaches it as an attribute.
+
+ :ivar models: Alias to model classes used in this operation group.
+ :type models: ~storage_cache_management_client.models
+ :param client: Client for service requests.
+ :param config: Configuration of service client.
+ :param serializer: An object model serializer.
+ :param deserializer: An object model deserializer.
+ """
+
+ models = models
+
+ def __init__(self, client, config, serializer, deserializer):
+ self._client = client
+ self._serialize = serializer
+ self._deserialize = deserializer
+ self._config = config
+
+ def list(
+ self,
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Iterable["models.UsageModelsResult"]
+ """Get the list of Cache Usage Models available to this subscription.
+
+ :keyword callable cls: A custom type or function that will be passed the direct response
+ :return: An iterator like instance of either UsageModelsResult or the result of cls(response)
+ :rtype: ~azure.core.paging.ItemPaged[~storage_cache_management_client.models.UsageModelsResult]
+ :raises: ~azure.core.exceptions.HttpResponseError
+ """
+ cls = kwargs.pop('cls', None) # type: ClsType["models.UsageModelsResult"]
+ error_map = {
+ 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
+ }
+ error_map.update(kwargs.pop('error_map', {}))
+ api_version = "2020-10-01"
+ accept = "application/json"
+
+ def prepare_request(next_link=None):
+ # Construct headers
+ header_parameters = {} # type: Dict[str, Any]
+ header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
+
+ if not next_link:
+ # Construct URL
+ url = self.list.metadata['url'] # type: ignore
+ path_format_arguments = {
+ 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
+ }
+ url = self._client.format_url(url, **path_format_arguments)
+ # Construct parameters
+ query_parameters = {} # type: Dict[str, Any]
+ query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
+
+ request = self._client.get(url, query_parameters, header_parameters)
+ else:
+ url = next_link
+ query_parameters = {} # type: Dict[str, Any]
+ request = self._client.get(url, query_parameters, header_parameters)
+ return request
+
+ def extract_data(pipeline_response):
+ deserialized = self._deserialize('UsageModelsResult', pipeline_response)
+ list_of_elem = deserialized.value
+ if cls:
+ list_of_elem = cls(list_of_elem)
+ return deserialized.next_link or None, iter(list_of_elem)
+
+ def get_next(next_link=None):
+ request = prepare_request(next_link)
+
+ pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
+ response = pipeline_response.http_response
+
+ if response.status_code not in [200]:
+ map_error(status_code=response.status_code, response=response, error_map=error_map)
+ raise HttpResponseError(response=response, error_format=ARMErrorFormat)
+
+ return pipeline_response
+
+ return ItemPaged(
+ get_next, extract_data
+ )
+ list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/usageModels'} # type: ignore
diff --git a/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed
new file mode 100644
index 00000000000..e5aff4f83af
--- /dev/null
+++ b/src/storagecache/azext_storagecache/vendored_sdks/storagecache/py.typed
@@ -0,0 +1 @@
+# Marker file for PEP 561.
\ No newline at end of file
diff --git a/src/storagecache/report.md b/src/storagecache/report.md
new file mode 100644
index 00000000000..5febec58c27
--- /dev/null
+++ b/src/storagecache/report.md
@@ -0,0 +1,360 @@
+# Azure CLI Module Creation Report
+
+## EXTENSION
+|CLI Extension|Command Groups|
+|---------|------------|
+|az storagecache|[groups](#CommandGroups)
+
+## GROUPS
+### Command groups in `az storagecache` extension
+|CLI Command Group|Group Swagger name|Commands|
+|---------|------------|--------|
+|az storagecache sku|Skus|[commands](#CommandsInSkus)|
+|az storagecache usage-model|UsageModels|[commands](#CommandsInUsageModels)|
+|az storagecache asc-operation|AscOperations|[commands](#CommandsInAscOperations)|
+|az storagecache cache|Caches|[commands](#CommandsInCaches)|
+|az storagecache storage-target|StorageTargets|[commands](#CommandsInStorageTargets)|
+
+## COMMANDS
+### Commands in `az storagecache asc-operation` group
+|CLI Command|Operation Swagger name|Parameters|Examples|
+|---------|------------|--------|-----------|
+|[az storagecache asc-operation show](#AscOperationsGet)|Get|[Parameters](#ParametersAscOperationsGet)|[Example](#ExamplesAscOperationsGet)|
+
+### Commands in `az storagecache cache` group
+|CLI Command|Operation Swagger name|Parameters|Examples|
+|---------|------------|--------|-----------|
+|[az storagecache cache list](#CachesListByResourceGroup)|ListByResourceGroup|[Parameters](#ParametersCachesListByResourceGroup)|[Example](#ExamplesCachesListByResourceGroup)|
+|[az storagecache cache list](#CachesList)|List|[Parameters](#ParametersCachesList)|[Example](#ExamplesCachesList)|
+|[az storagecache cache show](#CachesGet)|Get|[Parameters](#ParametersCachesGet)|[Example](#ExamplesCachesGet)|
+|[az storagecache cache create](#CachesCreateOrUpdate#Create)|CreateOrUpdate#Create|[Parameters](#ParametersCachesCreateOrUpdate#Create)|[Example](#ExamplesCachesCreateOrUpdate#Create)|
+|[az storagecache cache update](#CachesUpdate)|Update|[Parameters](#ParametersCachesUpdate)|[Example](#ExamplesCachesUpdate)|
+|[az storagecache cache delete](#CachesDelete)|Delete|[Parameters](#ParametersCachesDelete)|[Example](#ExamplesCachesDelete)|
+|[az storagecache cache flush](#CachesFlush)|Flush|[Parameters](#ParametersCachesFlush)|[Example](#ExamplesCachesFlush)|
+|[az storagecache cache start](#CachesStart)|Start|[Parameters](#ParametersCachesStart)|[Example](#ExamplesCachesStart)|
+|[az storagecache cache stop](#CachesStop)|Stop|[Parameters](#ParametersCachesStop)|[Example](#ExamplesCachesStop)|
+|[az storagecache cache upgrade-firmware](#CachesUpgradeFirmware)|UpgradeFirmware|[Parameters](#ParametersCachesUpgradeFirmware)|[Example](#ExamplesCachesUpgradeFirmware)|
+
+### Commands in `az storagecache sku` group
+|CLI Command|Operation Swagger name|Parameters|Examples|
+|---------|------------|--------|-----------|
+|[az storagecache sku list](#SkusList)|List|[Parameters](#ParametersSkusList)|[Example](#ExamplesSkusList)|
+
+### Commands in `az storagecache storage-target` group
+|CLI Command|Operation Swagger name|Parameters|Examples|
+|---------|------------|--------|-----------|
+|[az storagecache storage-target list](#StorageTargetsListByCache)|ListByCache|[Parameters](#ParametersStorageTargetsListByCache)|[Example](#ExamplesStorageTargetsListByCache)|
+|[az storagecache storage-target show](#StorageTargetsGet)|Get|[Parameters](#ParametersStorageTargetsGet)|[Example](#ExamplesStorageTargetsGet)|
+|[az storagecache storage-target create](#StorageTargetsCreateOrUpdate#Create)|CreateOrUpdate#Create|[Parameters](#ParametersStorageTargetsCreateOrUpdate#Create)|[Example](#ExamplesStorageTargetsCreateOrUpdate#Create)|
+|[az storagecache storage-target update](#StorageTargetsCreateOrUpdate#Update)|CreateOrUpdate#Update|[Parameters](#ParametersStorageTargetsCreateOrUpdate#Update)|Not Found|
+|[az storagecache storage-target delete](#StorageTargetsDelete)|Delete|[Parameters](#ParametersStorageTargetsDelete)|[Example](#ExamplesStorageTargetsDelete)|
+
+### Commands in `az storagecache usage-model` group
+|CLI Command|Operation Swagger name|Parameters|Examples|
+|---------|------------|--------|-----------|
+|[az storagecache usage-model list](#UsageModelsList)|List|[Parameters](#ParametersUsageModelsList)|[Example](#ExamplesUsageModelsList)|
+
+
+## COMMAND DETAILS
+
+### group `az storagecache asc-operation`
+#### Command `az storagecache asc-operation show`
+
+##### Example
+```
+az storagecache asc-operation show --operation-id "testoperationid" --location "westus"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--location**|string|The name of the region used to look up the operation.|location|location|
+|**--operation-id**|string|The operation id which uniquely identifies the asynchronous operation.|operation_id|operationId|
+
+### group `az storagecache cache`
+#### Command `az storagecache cache list`
+
+##### Example
+```
+az storagecache cache list --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+
+#### Command `az storagecache cache list`
+
+##### Example
+```
+az storagecache cache list
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+#### Command `az storagecache cache show`
+
+##### Example
+```
+az storagecache cache show --cache-name "sc1" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+#### Command `az storagecache cache create`
+
+##### Example
+```
+az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-0000-0000-0000-\
+000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --sku-name \
+"Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Example
+```
+az storagecache cache create --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-0000-0000-0000-\
+000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --sku-name \
+"Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+|**--tags**|any|ARM tags as name/value pairs.|tags|tags|
+|**--location**|string|Region name string.|location|location|
+|**--cache-size-gb**|integer|The size of this Cache, in GB.|cache_size_gb|cacheSizeGB|
+|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState|
+|**--subnet**|string|Subnet used for the Cache.|subnet|subnet|
+|**--directory-services-settings-username-download-extended-groups-enabled**|boolean|This indicates if Extended Groups is enabled.|extended_groups_enabled|extendedGroupsEnabled|
+|**--directory-services-settings-username-download-username-source**|choice|This setting determines how the system gets username and group names for clients.|username_source|usernameSource|
+|**--directory-services-settings-username-download-group-file-uri**|string|The URI of the file containing the group information (in /etc/group file format). This field must be populated when 'usernameSource' is set to 'File'.|group_file_uri|groupFileURI|
+|**--directory-services-settings-username-download-user-file-uri**|string|The URI of the file containing the user information (in /etc/passwd file format). This field must be populated when 'usernameSource' is set to 'File'.|user_file_uri|userFileURI|
+|**--directory-services-settings-username-download-ldap-server**|string|The fully qualified domain name or IP address of the LDAP server to use.|ldap_server|ldapServer|
+|**--directory-services-settings-username-download-ldap-base-dn**|string|The base distinguished name for the LDAP domain.|ldap_base_dn|ldapBaseDn|
+|**--directory-services-settings-username-download-encrypt-ldap-connection**|boolean|This indicates if the LDAP connection should be encrypted.|encrypt_ldap_connection|encryptLdapConnection|
+|**--directory-services-settings-username-download-require-valid-certificate**|boolean|Determines if the certificates should be validated by a certificate authority. When true, caCertificateURI must be provided.|require_valid_certificate|requireValidCertificate|
+|**--directory-services-settings-username-download-auto-download-certificate**|boolean|Determines if the certificate should be automatically downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a self signed certificate otherwise.|auto_download_certificate|autoDownloadCertificate|
+|**--directory-services-settings-username-download-ca-certificate-uri**|string|The URI of the CA certificate to validate the LDAP secure connection. This field must be populated when 'requireValidCertificate' is set to true.|ca_certificate_uri|caCertificateURI|
+|**--directory-services-settings-username-download-credentials**|object|When present, these are the credentials for the secure LDAP connection.|credentials|credentials|
+|**--directory-services-settings-active-directory-primary-dns-ip-address**|string|Primary DNS IP address used to resolve the Active Directory domain controller's fully qualified domain name.|primary_dns_ip_address|primaryDnsIpAddress|
+|**--directory-services-settings-active-directory-secondary-dns-ip-address**|string|Secondary DNS IP address used to resolve the Active Directory domain controller's fully qualified domain name.|secondary_dns_ip_address|secondaryDnsIpAddress|
+|**--directory-services-settings-active-directory-domain-name**|string|The fully qualified domain name of the Active Directory domain controller.|domain_name|domainName|
+|**--directory-services-settings-active-directory-domain-net-bios**|string|The Active Directory domain's NetBIOS name.|domain_net_bios|domainNetBios|
+|**--directory-services-settings-active-directory-smb-server-name**|string|The name (NetBIOS) used for the HPC Cache to join the Active Directory domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.|smb_server_name|smbServerName|
+|**--security-settings-access-policies**|array|NFS access policies defined for this cache.|access_policies|accessPolicies|
+|**--encryption-settings-key-encryption-key-key-url**|string|The URL referencing a key encryption key in Key Vault.|key_url|keyUrl|
+|**--encryption-settings-key-encryption-key-source-vault-id**|string|Resource Id.|id|id|
+|**--network-settings-mtu**|integer|The IPv4 maximum transmission unit configured for the subnet.|mtu|mtu|
+|**--sku-name**|string|SKU name for this Cache.|name|name|
+|**--identity-type**|sealed-choice|The type of identity used for the cache|type|type|
+
+#### Command `az storagecache cache update`
+
+##### Example
+```
+az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-0000-0000-0000-\
+000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --sku-name \
+"Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Example
+```
+az storagecache cache update --location "westus" --cache-size-gb 3072 --subnet "/subscriptions/00000000-0000-0000-0000-\
+000000000000/resourceGroups/scgroup/providers/Microsoft.Network/virtualNetworks/scvnet/subnets/sub1" --sku-name \
+"Standard_2G" --tags "{\\"Dept\\":\\"ContosoAds\\"}" --cache-name "sc1" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+|**--tags**|any|ARM tags as name/value pairs.|tags|tags|
+|**--location**|string|Region name string.|location|location|
+|**--cache-size-gb**|integer|The size of this Cache, in GB.|cache_size_gb|cacheSizeGB|
+|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState|
+|**--subnet**|string|Subnet used for the Cache.|subnet|subnet|
+|**--directory-services-settings-username-download-extended-groups-enabled**|boolean|This indicates if Extended Groups is enabled.|extended_groups_enabled|extendedGroupsEnabled|
+|**--directory-services-settings-username-download-username-source**|choice|This setting determines how the system gets username and group names for clients.|username_source|usernameSource|
+|**--directory-services-settings-username-download-group-file-uri**|string|The URI of the file containing the group information (in /etc/group file format). This field must be populated when 'usernameSource' is set to 'File'.|group_file_uri|groupFileURI|
+|**--directory-services-settings-username-download-user-file-uri**|string|The URI of the file containing the user information (in /etc/passwd file format). This field must be populated when 'usernameSource' is set to 'File'.|user_file_uri|userFileURI|
+|**--directory-services-settings-username-download-ldap-server**|string|The fully qualified domain name or IP address of the LDAP server to use.|ldap_server|ldapServer|
+|**--directory-services-settings-username-download-ldap-base-dn**|string|The base distinguished name for the LDAP domain.|ldap_base_dn|ldapBaseDn|
+|**--directory-services-settings-username-download-encrypt-ldap-connection**|boolean|This indicates if the LDAP connection should be encrypted.|encrypt_ldap_connection|encryptLdapConnection|
+|**--directory-services-settings-username-download-require-valid-certificate**|boolean|Determines if the certificates should be validated by a certificate authority. When true, caCertificateURI must be provided.|require_valid_certificate|requireValidCertificate|
+|**--directory-services-settings-username-download-auto-download-certificate**|boolean|Determines if the certificate should be automatically downloaded. This applies to 'caCertificateURI' when 'requireValidCertificate' is true, or a self signed certificate otherwise.|auto_download_certificate|autoDownloadCertificate|
+|**--directory-services-settings-username-download-ca-certificate-uri**|string|The URI of the CA certificate to validate the LDAP secure connection. This field must be populated when 'requireValidCertificate' is set to true.|ca_certificate_uri|caCertificateURI|
+|**--directory-services-settings-username-download-credentials**|object|When present, these are the credentials for the secure LDAP connection.|credentials|credentials|
+|**--directory-services-settings-active-directory-primary-dns-ip-address**|string|Primary DNS IP address used to resolve the Active Directory domain controller's fully qualified domain name.|primary_dns_ip_address|primaryDnsIpAddress|
+|**--directory-services-settings-active-directory-secondary-dns-ip-address**|string|Secondary DNS IP address used to resolve the Active Directory domain controller's fully qualified domain name.|secondary_dns_ip_address|secondaryDnsIpAddress|
+|**--directory-services-settings-active-directory-domain-name**|string|The fully qualified domain name of the Active Directory domain controller.|domain_name|domainName|
+|**--directory-services-settings-active-directory-domain-net-bios**|string|The Active Directory domain's NetBIOS name.|domain_net_bios|domainNetBios|
+|**--directory-services-settings-active-directory-smb-server-name**|string|The name (NetBIOS) used for the HPC Cache to join the Active Directory domain. Length must not be greater than 15 and chars must be from the [-0-9a-zA-Z_] char class.|smb_server_name|smbServerName|
+|**--security-settings-access-policies**|array|NFS access policies defined for this cache.|access_policies|accessPolicies|
+|**--encryption-settings-key-encryption-key-key-url**|string|The URL referencing a key encryption key in Key Vault.|key_url|keyUrl|
+|**--encryption-settings-key-encryption-key-source-vault-id**|string|Resource Id.|id|id|
+|**--network-settings-mtu**|integer|The IPv4 maximum transmission unit configured for the subnet.|mtu|mtu|
+|**--sku-name**|string|SKU name for this Cache.|name|name|
+|**--identity-type**|sealed-choice|The type of identity used for the cache|type|type|
+
+#### Command `az storagecache cache delete`
+
+##### Example
+```
+az storagecache cache delete --cache-name "sc" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+#### Command `az storagecache cache flush`
+
+##### Example
+```
+az storagecache cache flush --cache-name "sc" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+#### Command `az storagecache cache start`
+
+##### Example
+```
+az storagecache cache start --cache-name "sc" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+#### Command `az storagecache cache stop`
+
+##### Example
+```
+az storagecache cache stop --cache-name "sc" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+#### Command `az storagecache cache upgrade-firmware`
+
+##### Example
+```
+az storagecache cache upgrade-firmware --cache-name "sc1" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+### group `az storagecache sku`
+#### Command `az storagecache sku list`
+
+##### Example
+```
+az storagecache sku list
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+### group `az storagecache storage-target`
+#### Command `az storagecache storage-target list`
+
+##### Example
+```
+az storagecache storage-target list --cache-name "sc1" --resource-group "scgroup"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+
+#### Command `az storagecache storage-target show`
+
+##### Example
+```
+az storagecache storage-target show --cache-name "sc1" --resource-group "scgroup" --name "st1"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+|**--storage-target-name**|string|Name of the Storage Target. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|storage_target_name|storageTargetName|
+
+#### Command `az storagecache storage-target create`
+
+##### Example
+```
+az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" --junctions \
+namespace-path="/path/on/cache" nfs-access-policy="default" nfs-export="exp1" target-path="/path/on/exp1" --junctions \
+namespace-path="/path2/on/cache" nfs-access-policy="rootSquash" nfs-export="exp2" target-path="/path2/on/exp2" --nfs3 \
+target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --target-type "nfs3"
+```
+##### Example
+```
+az storagecache storage-target create --cache-name "sc1" --resource-group "scgroup" --name "st1" --nfs3 \
+target="10.0.44.44" usage-model="READ_HEAVY_INFREQ" --target-type "nfs3"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+|**--storage-target-name**|string|Name of the Storage Target. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|storage_target_name|storageTargetName|
+|**--junctions**|array|List of Cache namespace junctions to target for namespace associations.|junctions|junctions|
+|**--target-type**|choice|Type of the Storage Target.|target_type|targetType|
+|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState|
+|**--nfs3**|object|Properties when targetType is nfs3.|nfs3|nfs3|
+|**--unknown-unknown-map**|dictionary|Dictionary of string->string pairs containing information about the Storage Target.|unknown_map|unknownMap|
+|**--clfs-target**|string|Resource ID of storage container.|target|target|
+
+#### Command `az storagecache storage-target update`
+
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+|**--storage-target-name**|string|Name of the Storage Target. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|storage_target_name|storageTargetName|
+|**--junctions**|array|List of Cache namespace junctions to target for namespace associations.|junctions|junctions|
+|**--target-type**|choice|Type of the Storage Target.|target_type|targetType|
+|**--provisioning-state**|choice|ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property|provisioning_state|provisioningState|
+|**--nfs3**|object|Properties when targetType is nfs3.|nfs3|nfs3|
+|**--unknown-unknown-map**|dictionary|Dictionary of string->string pairs containing information about the Storage Target.|unknown_map|unknownMap|
+|**--clfs-target**|string|Resource ID of storage container.|target|target|
+
+#### Command `az storagecache storage-target delete`
+
+##### Example
+```
+az storagecache storage-target delete --cache-name "sc1" --resource-group "scgroup" --name "st1"
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
+|**--resource-group-name**|string|Target resource group.|resource_group_name|resourceGroupName|
+|**--cache-name**|string|Name of Cache. Length of name must not be greater than 80 and chars must be from the [-0-9a-zA-Z_] char class.|cache_name|cacheName|
+|**--storage-target-name**|string|Name of Storage Target.|storage_target_name|storageTargetName|
+
+### group `az storagecache usage-model`
+#### Command `az storagecache usage-model list`
+
+##### Example
+```
+az storagecache usage-model list
+```
+##### Parameters
+|Option|Type|Description|Path (SDK)|Swagger name|
+|------|----|-----------|----------|------------|
\ No newline at end of file
diff --git a/src/storagecache/setup.cfg b/src/storagecache/setup.cfg
new file mode 100644
index 00000000000..2fdd96e5d39
--- /dev/null
+++ b/src/storagecache/setup.cfg
@@ -0,0 +1 @@
+#setup.cfg
\ No newline at end of file
diff --git a/src/storagecache/setup.py b/src/storagecache/setup.py
new file mode 100644
index 00000000000..8f359711a2e
--- /dev/null
+++ b/src/storagecache/setup.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+
+from codecs import open
+from setuptools import setup, find_packages
+
+# HISTORY.rst entry.
+VERSION = '0.1.0'
+try:
+ from azext_storagecache.manual.version import VERSION
+except ImportError:
+ pass
+
+# The full list of classifiers is available at
+# https://pypi.python.org/pypi?%3Aaction=list_classifiers
+CLASSIFIERS = [
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: System Administrators',
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ 'License :: OSI Approved :: MIT License',
+]
+
+DEPENDENCIES = []
+
+try:
+ from azext_storagecache.manual.dependency import DEPENDENCIES
+except ImportError:
+ pass
+
+with open('README.md', 'r', encoding='utf-8') as f:
+ README = f.read()
+with open('HISTORY.rst', 'r', encoding='utf-8') as f:
+ HISTORY = f.read()
+
+setup(
+ name='storagecache',
+ version=VERSION,
+ description='Microsoft Azure Command-Line Tools StorageCacheManagementClient Extension',
+ author='Microsoft Corporation',
+ author_email='azpycli@microsoft.com',
+ url='https://github.com/Azure/azure-cli-extensions/tree/master/src/storagecache',
+ long_description=README + '\n\n' + HISTORY,
+ license='MIT',
+ classifiers=CLASSIFIERS,
+ packages=find_packages(),
+ install_requires=DEPENDENCIES,
+ package_data={'azext_storagecache': ['azext_metadata.json']},
+)