diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml
index 308bbe24f5..a52d030221 100644
--- a/.github/workflows/trivy.yaml
+++ b/.github/workflows/trivy.yaml
@@ -28,5 +28,5 @@ jobs:
exit-code: '1'
ignore-unfixed: true
vuln-type: 'os,library'
- severity: 'CRITICAL,HIGH'
+ severity: 'CRITICAL,HIGH,MEDIUM,LOW,UNKNOWN'
diff --git a/.github/workflows/windows.yaml b/.github/workflows/windows.yaml
index e629883b2e..73830f2f04 100644
--- a/.github/workflows/windows.yaml
+++ b/.github/workflows/windows.yaml
@@ -25,7 +25,7 @@ jobs:
run: |
# start the CSI Proxy before running tests on windows
Start-Job -Name CSIProxy -ScriptBlock {
- Invoke-WebRequest https://acs-mirror.azureedge.net/csi-proxy/v1.0.0/binaries/csi-proxy-v1.0.0.tar.gz -OutFile csi-proxy.tar.gz;
+ Invoke-WebRequest https://acs-mirror.azureedge.net/csi-proxy/v1.0.2/binaries/csi-proxy-v1.0.2.tar.gz -OutFile csi-proxy.tar.gz;
tar -xvf csi-proxy.tar.gz
.\bin\csi-proxy.exe --kubelet-path $pwd
};
diff --git a/Makefile b/Makefile
index d26cf5492b..fc9f52452a 100755
--- a/Makefile
+++ b/Makefile
@@ -17,7 +17,7 @@ GIT_COMMIT ?= $(shell git rev-parse HEAD)
REGISTRY ?= andyzhangx
REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g")
IMAGE_NAME ?= azurefile-csi
-IMAGE_VERSION ?= v1.7.0
+IMAGE_VERSION ?= v1.8.0
# Use a custom version for E2E tests if we are testing in CI
ifdef CI
ifndef PUBLISH
diff --git a/README.md b/README.md
index f224a0afeb..a19db9d232 100644
--- a/README.md
+++ b/README.md
@@ -11,10 +11,10 @@ This driver allows Kubernetes to use [Azure File](https://docs.microsoft.com/en-
### Container Images & Kubernetes Compatibility:
|Driver Version |Image | supported k8s version |
|----------------|----------------------------------------------- |-----------------------|
-|master branch |mcr.microsoft.com/k8s/csi/azurefile-csi:latest | 1.18+ |
+|master branch |mcr.microsoft.com/k8s/csi/azurefile-csi:latest | 1.19+ |
+|v1.8.0 |mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0 | 1.19+ |
|v1.7.0 |mcr.microsoft.com/k8s/csi/azurefile-csi:v1.7.0 | 1.18+ |
|v1.6.0 |mcr.microsoft.com/k8s/csi/azurefile-csi:v1.6.0 | 1.18+ |
-|v1.5.0 |mcr.microsoft.com/k8s/csi/azurefile-csi:v1.5.0 | 1.18+ |
### Driver parameters
Please refer to [driver parameters](./docs/driver-parameters.md)
diff --git a/charts/README.md b/charts/README.md
index 8d9d4df56d..24ec24295e 100644
--- a/charts/README.md
+++ b/charts/README.md
@@ -14,16 +14,10 @@
- `--set node.cloudConfigSecretNamesapce`
- switch to `mcr.azk8s.cn` repository in Azure China: `--set image.baseRepo=mcr.azk8s.cn`
-### install latest version
-```console
-helm repo add azurefile-csi-driver https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts
-helm install azurefile-csi-driver azurefile-csi-driver/azurefile-csi-driver --namespace kube-system
-```
-
### install a specific version
```console
helm repo add azurefile-csi-driver https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts
-helm install azurefile-csi-driver azurefile-csi-driver/azurefile-csi-driver --namespace kube-system --version v1.4.0
+helm install azurefile-csi-driver azurefile-csi-driver/azurefile-csi-driver --namespace kube-system --version v1.8.0
```
### install on RedHat/CentOS
@@ -58,6 +52,7 @@ The following table lists the configurable parameters of the latest Azure File C
| `driver.customUserAgent` | custom userAgent | `` |
| `driver.userAgentSuffix` | userAgent suffix | `OSS-helm` |
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster(only applied for NFS protocol) | `false` |
+| `feature.enableGetVolumeStats` | allow GET_VOLUME_STATS on agent node | `false` |
| `image.baseRepo` | base repository of driver images | `mcr.microsoft.com` |
| `image.azurefile.repository` | azurefile-csi-driver docker image | `/k8s/csi/azurefile-csi` |
| `image.azurefile.tag` | azurefile-csi-driver docker image tag | `latest` |
@@ -72,21 +67,23 @@ The following table lists the configurable parameters of the latest Azure File C
| `image.csiResizer.tag` | csi-resizer docker image tag | `v1.3.0` |
| `image.csiResizer.pullPolicy` | csi-resizer image pull policy | `IfNotPresent` |
| `image.livenessProbe.repository` | liveness-probe docker image | `/oss/kubernetes-csi/livenessprobe` |
-| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.4.0` |
+| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.5.0` |
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` |
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `/oss/kubernetes-csi/csi-node-driver-registrar` |
-| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.3.0` |
+| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.4.0` |
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) |
+| `customLabels` | Custom labels to add into metadata | `{}` |
| `serviceAccount.create` | whether create service account of csi-azurefile-controller, csi-azurefile-node, and snapshot-controller| `true` |
| `serviceAccount.controller` | name of service account for csi-azurefile-controller | `csi-azurefile-controller-sa` |
| `serviceAccount.node` | name of service account for csi-azurefile-node | `csi-azurefile-node-sa` |
| `serviceAccount.snapshotController` | name of service account for csi-snapshot-controller | `csi-snapshot-controller-sa` |
| `rbac.create` | whether create rbac for this driver | `true` |
-| `rbac.name` | driver name in rbac role | `true` |
+| `rbac.name` | driver name in rbac role | `azurefile` |
| `controller.name` | name of driver deployment | `csi-azurefile-controller`
| `controller.cloudConfigSecretName` | cloud config secret name of controller driver | `azure-cloud-provider`
| `controller.cloudConfigSecretNamespace` | cloud config secret namespace of controller driver | `kube-system`
+| `controller.allowEmptyCloudConfig` | Whether allow running controller driver without cloud config | `true`
| `controller.replicas` | replicas of csi-azurefile-controller | `2` |
| `controller.hostNetwork` | `hostNetwork` setting on controller driver(could be disabled if controller does not depend on MSI setting) | `true` | `true`, `false`
| `controller.metricsPort` | metrics port of csi-azurefile-controller |`29614` |
@@ -94,10 +91,35 @@ The following table lists the configurable parameters of the latest Azure File C
| `controller.runOnMaster` | run controller on master node |`false` |
| `controller.attachRequired` | enable attach/detach (only valid for vhd disk feature) |`false` |
| `controller.logLevel` | controller driver log level |`5` |
+| `controller.resources.csiProvisioner.limits.cpu` | csi-provisioner cpu limits | 1 |
+| `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | 500Mi |
+| `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | 10m |
+| `controller.resources.csiProvisioner.requests.memory` | csi-provisioner memory requests limits | 20Mi |
+| `controller.resources.csiAttacher.limits.cpu` | csi-attacher cpu limits | 1 |
+| `controller.resources.csiAttacher.limits.memory` | csi-attacher memory limits | 500Mi |
+| `controller.resources.csiAttacher.requests.cpu` | csi-attacher cpu requests limits | 10m |
+| `controller.resources.csiAttacher.requests.memory` | csi-attacher memory requests limits | 20Mi |
+| `controller.resources.csiResizer.limits.cpu` | csi-resizer cpu limits | 1 |
+| `controller.resources.csiResizer.limits.memory` | csi-resizer memory limits | 500Mi |
+| `controller.resources.csiResizer.requests.cpu` | csi-resizer cpu requests limits | 10m |
+| `controller.resources.csiResizer.requests.memory` | csi-resizer memory requests limits | 20Mi |
+| `controller.resources.csiSnapshotter.limits.cpu` | csi-snapshotter cpu limits | 1 |
+| `controller.resources.csiSnapshotter.limits.memory` | csi-snapshotter memory limits | 500Mi |
+| `controller.resources.csiSnapshotter.requests.cpu` | csi-snapshotter cpu requests limits | 10m |
+| `controller.resources.csiSnapshotter.requests.memory` | csi-snapshotter memory requests limits | 20Mi |
+| `controller.resources.livenessProbe.limits.cpu` | liveness-probe cpu limits | 1 |
+| `controller.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 100Mi |
+| `controller.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m |
+| `controller.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi |
+| `controller.resources.azurefile.limits.cpu` | azurefile cpu limits | 1 |
+| `controller.resources.azurefile.limits.memory` | azurefile memory limits | 200Mi |
+| `controller.resources.azurefile.requests.cpu` | azurefile cpu requests limits | 10m |
+| `controller.resources.azurefile.requests.memory` | azurefile memory requests limits | 20Mi |
| `controller.kubeconfig` | configure kubeconfig path on controller node | '' (empty, use InClusterConfig by default)
| `controller.tolerations` | controller pod tolerations | |
| `node.cloudConfigSecretName` | cloud config secret name of node driver | `azure-cloud-provider`
| `node.cloudConfigSecretNamespace` | cloud config secret namespace of node driver | `kube-system`
+| `node.allowEmptyCloudConfig` | Whether allow running node driver without cloud config | `true`
| `node.maxUnavailable` | `maxUnavailable` value of driver node daemonset | `1`
| `node.metricsPort` | metrics port of csi-azurefile-node |`29615` |
| `node.livenessProbe.healthPort ` | health check port for liveness probe | `29613` |
@@ -110,7 +132,11 @@ The following table lists the configurable parameters of the latest Azure File C
| `snapshot.image.csiSnapshotController.tag` | snapshot-controller docker image tag | `v3.0.3` |
| `snapshot.image.csiSnapshotController.pullPolicy` | snapshot-controller image pull policy | `IfNotPresent` |
| `snapshot.snapshotController.name` | snapshot controller name | `csi-snapshot-controller` |
-| `snapshot.snapshotController.replicas` | the replicas of snapshot-controller | `1` |
+| `snapshot.snapshotController.replicas` | the replicas of snapshot-controller | `2` |
+| `snapshot.snapshotController.resources.limits.cpu` | csi-snapshot-controller cpu limits | 1 |
+| `snapshot.snapshotController.resources.limits.memory` | csi-snapshot-controller memory limits | 100Mi |
+| `snapshot.snapshotController.resources.requests.cpu` | csi-snapshot-controller cpu requests limits | 10m |
+| `snapshot.snapshotController.resources.requests.memory` | csi-snapshot-controller memory requests limits | 20Mi |
| `linux.enabled` | whether enable linux feature | `true` |
| `linux.dsName` | name of driver daemonset on linux |`csi-azurefile-node` |
| `linux.dnsPolicy` | dnsPolicy setting of driver daemonset on linux | `Default` (available values: `Default`, `ClusterFirst`, `ClusterFirstWithHostNet`, `None`)
@@ -118,11 +144,35 @@ The following table lists the configurable parameters of the latest Azure File C
| `linux.kubeconfig` | configure kubeconfig path on Linux agent node | '' (empty, use InClusterConfig by default) |
| `linux.distro` | configure ssl certificates for different Linux distribution(available values: `debian`, `fedora`) |
| `linux.tolerations` | linux node driver tolerations |
+| `linux.resources.livenessProbe.limits.cpu` | liveness-probe cpu limits | 1 |
+| `linux.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 100Mi |
+| `linux.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m |
+| `linux.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi |
+| `linux.resources.nodeDriverRegistrar.limits.cpu` | csi-node-driver-registrar cpu limits | 200m |
+| `linux.resources.nodeDriverRegistrar.limits.memory` | csi-node-driver-registrar memory limits | 100Mi |
+| `linux.resources.nodeDriverRegistrar.requests.cpu` | csi-node-driver-registrar cpu requests limits | 10m |
+| `linux.resources.nodeDriverRegistrar.requests.memory` | csi-node-driver-registrar memory requests limits | 20Mi |
+| `linux.resources.azurefile.limits.cpu` | azurefile cpu limits | 1 |
+| `linux.resources.azurefile.limits.memory` | azurefile memory limits | 200Mi |
+| `linux.resources.azurefile.requests.cpu` | azurefile cpu requests limits | 10m |
+| `linux.resources.azurefile.requests.memory` | azurefile memory requests limits | 20Mi |
| `windows.enabled` | whether enable windows feature | `true` |
| `windows.dsName` | name of driver daemonset on windows |`csi-azurefile-node-win` |
| `windows.kubelet` | configure kubelet directory path on Windows agent node | `'C:\var\lib\kubelet'` |
| `windows.kubeconfig` | configure kubeconfig path on Windows agent node | `'C:\k\config'` |
| `windows.tolerations` | windows node driver tolerations | |
+| `windows.resources.livenessProbe.limits.cpu` | liveness-probe cpu limits | 1 |
+| `windows.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 200Mi |
+| `windows.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m |
+| `windows.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi |
+| `windows.resources.nodeDriverRegistrar.limits.cpu` | csi-node-driver-registrar cpu limits | 200m |
+| `windows.resources.nodeDriverRegistrar.limits.memory` | csi-node-driver-registrar memory limits | 200Mi |
+| `windows.resources.nodeDriverRegistrar.requests.cpu` | csi-node-driver-registrar cpu requests limits | 10m |
+| `windows.resources.nodeDriverRegistrar.requests.memory` | csi-node-driver-registrar memory requests limits | 20Mi |
+| `windows.resources.azurefile.limits.cpu` | azurefile cpu limits | 1 |
+| `windows.resources.azurefile.limits.memory` | azurefile memory limits | 400Mi |
+| `windows.resources.azurefile.requests.cpu` | azurefile cpu requests limits | 10m |
+| `windows.resources.azurefile.requests.memory` | azurefile memory requests limits | 20Mi |
## troubleshooting
- Add `--wait -v=5 --debug` in `helm install` command to get detailed error
diff --git a/charts/index.yaml b/charts/index.yaml
index e783c245d8..0eb7abbf10 100644
--- a/charts/index.yaml
+++ b/charts/index.yaml
@@ -3,16 +3,25 @@ entries:
azurefile-csi-driver:
- apiVersion: v1
appVersion: latest
- created: "2021-10-11T02:43:43.59088318Z"
+ created: "2021-12-04T14:04:56.612011554Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
- digest: 85c31eb051f01f0445621be8e9961b8802b6d52bf0f02d68c41650cabb29a39d
+ digest: 2ad3c8fee576ea02983126a0ecc068904f26827b2f475275d111099bae97f58f
name: azurefile-csi-driver
urls:
- - https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts/latest/azurefile-csi-driver-v1.7.0.tgz
- version: v1.7.0
+ - https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts/latest/azurefile-csi-driver-v1.8.0.tgz
+ version: v1.8.0
+ - apiVersion: v1
+ appVersion: v1.8.0
+ created: "2021-12-04T14:04:56.627689653Z"
+ description: Azure File Container Storage Interface (CSI) Storage Plugin
+ digest: 541f66b3cd8cf6565cd4c50cedab85275821b74fb2ed15eb3d1ad8d34f64dde1
+ name: azurefile-csi-driver
+ urls:
+ - https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts/v1.8.0/azurefile-csi-driver-v1.8.0.tgz
+ version: v1.8.0
- apiVersion: v1
appVersion: v1.7.0
- created: "2021-10-11T02:43:43.614171361Z"
+ created: "2021-12-04T14:04:56.626147243Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 057b3f6ef6001d3457fbffc27f90316c981a089696abd3d38bcc8de5537dfa6f
name: azurefile-csi-driver
@@ -21,7 +30,7 @@ entries:
version: v1.7.0
- apiVersion: v1
appVersion: v1.6.0
- created: "2021-10-11T02:43:43.613176353Z"
+ created: "2021-12-04T14:04:56.625174137Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: cc2a0dda824cdda4e8141e26878bbb481c5a52e45785a5dbf72e54f2a376e522
name: azurefile-csi-driver
@@ -30,7 +39,7 @@ entries:
version: v1.6.0
- apiVersion: v1
appVersion: v1.5.0
- created: "2021-10-11T02:43:43.611693742Z"
+ created: "2021-12-04T14:04:56.62408433Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 2258177477415ddecd83dc46dfd88833223623224c7fe396590b617082bcd845
name: azurefile-csi-driver
@@ -39,7 +48,7 @@ entries:
version: v1.5.0
- apiVersion: v1
appVersion: v1.4.0
- created: "2021-10-11T02:43:43.610298831Z"
+ created: "2021-12-04T14:04:56.622890323Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 40e9bc4ee187789166fcb7c3c82b85b33ecd3a6096266fe74e411d6b48961ece
name: azurefile-csi-driver
@@ -48,7 +57,7 @@ entries:
version: v1.4.0
- apiVersion: v1
appVersion: v1.3.0
- created: "2021-10-11T02:43:43.608223815Z"
+ created: "2021-12-04T14:04:56.621085211Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 12942f422b7cccbfe950bbdbd5c844f5ae4b7c292f32389cba312730a6fe9a62
name: azurefile-csi-driver
@@ -57,7 +66,7 @@ entries:
version: v1.3.0
- apiVersion: v1
appVersion: v1.2.0
- created: "2021-10-11T02:43:43.606129199Z"
+ created: "2021-12-04T14:04:56.620238606Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: b62f44b757416a9e1f5a91e19285f5f5056ec6068802dd9cd82373bef40c9ee9
name: azurefile-csi-driver
@@ -66,7 +75,7 @@ entries:
version: v1.2.0
- apiVersion: v1
appVersion: v1.1.0
- created: "2021-10-11T02:43:43.605159291Z"
+ created: "2021-12-04T14:04:56.619406801Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 675d96b309a1c5c491053ebbb854c046737420929c4f0692839afdaaf0db3933
name: azurefile-csi-driver
@@ -75,7 +84,7 @@ entries:
version: v1.1.0
- apiVersion: v1
appVersion: v1.0.0
- created: "2021-10-11T02:43:43.60370208Z"
+ created: "2021-12-04T14:04:56.618761697Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 6fd5e54e949ef1061a08d5477bc580204c91dde8f01da195e95dd60ade209492
name: azurefile-csi-driver
@@ -84,7 +93,7 @@ entries:
version: v1.0.0
- apiVersion: v1
appVersion: v0.10.0
- created: "2021-10-11T02:43:43.592210391Z"
+ created: "2021-12-04T14:04:56.613008861Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 8437b70c263de3ba365d4719f1cdd976e7461d217504b43d136d985b40f2f7ef
name: azurefile-csi-driver
@@ -93,7 +102,7 @@ entries:
version: v0.10.0
- apiVersion: v1
appVersion: v0.9.0
- created: "2021-10-11T02:43:43.602261769Z"
+ created: "2021-12-04T14:04:56.618083993Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: ff42d33b0b98ce138e95028e084ce1fe9cecde82d2b96e22ca4aaf3e6453a15b
name: azurefile-csi-driver
@@ -102,7 +111,7 @@ entries:
version: v0.9.0
- apiVersion: v1
appVersion: v0.8.0
- created: "2021-10-11T02:43:43.600839158Z"
+ created: "2021-12-04T14:04:56.617060586Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 6811326fbd54832ef441bd54bb5a8bf5b0ca4734da1755c84b51ae713e35fb72
name: azurefile-csi-driver
@@ -111,7 +120,7 @@ entries:
version: v0.8.0
- apiVersion: v1
appVersion: v0.7.0
- created: "2021-10-11T02:43:43.599587048Z"
+ created: "2021-12-04T14:04:56.616222381Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: cc529dd3f6d19acc695f79cb08656ed0d8192f449eb70af3ab94d6b76c28c38d
name: azurefile-csi-driver
@@ -120,7 +129,7 @@ entries:
version: v0.7.0
- apiVersion: v1
appVersion: v0.6.0
- created: "2021-10-11T02:43:43.59855834Z"
+ created: "2021-12-04T14:04:56.615681077Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: eb6bc5333d6ed5788c0aa6d42e14e6228c1bf72c0db44de6f60c746e6f0cd3dd
name: azurefile-csi-driver
@@ -129,7 +138,7 @@ entries:
version: v0.6.0
- apiVersion: v1
appVersion: v0.5.0
- created: "2021-10-11T02:43:43.59727663Z"
+ created: "2021-12-04T14:04:56.615242275Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: 2d16cae9af5e306e4b3f1cba341532089e9871abe31d9ff20f6751215f908a01
name: azurefile-csi-driver
@@ -138,7 +147,7 @@ entries:
version: v0.5.0
- apiVersion: v1
appVersion: v0.4.0
- created: "2021-10-11T02:43:43.59599892Z"
+ created: "2021-12-04T14:04:56.614766272Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: fcc251b21abb2a5aee4df751424b33705850226119c1bfbb0c741d0ce020d1f2
name: azurefile-csi-driver
@@ -147,7 +156,7 @@ entries:
version: v0.4.0
- apiVersion: v1
appVersion: 0.3.0
- created: "2021-10-11T02:43:43.595004312Z"
+ created: "2021-12-04T14:04:56.614308069Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: d0fa02e4c7c15f106050c6d795e97b437ec8be359d33130ca02509effc1d7082
name: azurefile-csi-driver
@@ -156,11 +165,11 @@ entries:
version: 0.3.0
- apiVersion: v1
appVersion: 0.2.0
- created: "2021-10-11T02:43:43.593729003Z"
+ created: "2021-12-04T14:04:56.613650365Z"
description: Azure File Container Storage Interface (CSI) Storage Plugin
digest: fde4b97c4a57b1aa6a9345e25c29eccc6b83da7746b465813473a955a4fe9896
name: azurefile-csi-driver
urls:
- https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/master/charts/v0.2.0/azurefile-csi-driver-0.2.0.tgz
version: 0.2.0
-generated: "2021-10-11T02:43:43.589402669Z"
+generated: "2021-12-04T14:04:56.610484045Z"
diff --git a/charts/latest/azurefile-csi-driver-v1.7.0.tgz b/charts/latest/azurefile-csi-driver-v1.7.0.tgz
deleted file mode 100644
index f84cd4b082..0000000000
Binary files a/charts/latest/azurefile-csi-driver-v1.7.0.tgz and /dev/null differ
diff --git a/charts/latest/azurefile-csi-driver-v1.8.0.tgz b/charts/latest/azurefile-csi-driver-v1.8.0.tgz
new file mode 100644
index 0000000000..fcbe1b4665
Binary files /dev/null and b/charts/latest/azurefile-csi-driver-v1.8.0.tgz differ
diff --git a/charts/latest/azurefile-csi-driver/Chart.yaml b/charts/latest/azurefile-csi-driver/Chart.yaml
index 50d139cd1e..a2a09561c8 100755
--- a/charts/latest/azurefile-csi-driver/Chart.yaml
+++ b/charts/latest/azurefile-csi-driver/Chart.yaml
@@ -2,4 +2,4 @@ apiVersion: v1
appVersion: latest
description: Azure File Container Storage Interface (CSI) Storage Plugin
name: azurefile-csi-driver
-version: v1.7.0
+version: v1.8.0
diff --git a/charts/latest/azurefile-csi-driver/templates/_helpers.tpl b/charts/latest/azurefile-csi-driver/templates/_helpers.tpl
index b570b72f9f..b1bf4dc1b6 100644
--- a/charts/latest/azurefile-csi-driver/templates/_helpers.tpl
+++ b/charts/latest/azurefile-csi-driver/templates/_helpers.tpl
@@ -5,16 +5,39 @@
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
-{{/* labels for helm resources */}}
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "azurefile.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common selectors.
+*/}}
+{{- define "azurefile.selectorLabels" -}}
+app.kubernetes.io/name: {{ template "azurefile.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Common labels.
+*/}}
{{- define "azurefile.labels" -}}
-labels:
- app.kubernetes.io/instance: "{{ .Release.Name }}"
- app.kubernetes.io/managed-by: "{{ .Release.Service }}"
- app.kubernetes.io/name: "{{ template "azurefile.name" . }}"
- app.kubernetes.io/version: "{{ .Chart.AppVersion }}"
- helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+{{- include "azurefile.selectorLabels" . }}
+app.kubernetes.io/component: csi-driver
+app.kubernetes.io/part-of: {{ template "azurefile.name" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+helm.sh/chart: {{ template "azurefile.chart" . }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels }}
+{{- end }}
{{- end -}}
+
{{/* pull secrets for containers */}}
{{- define "azurefile.pullSecrets" -}}
{{- if .Values.imagePullSecrets }}
diff --git a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-controller.yaml b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-controller.yaml
index e603a654ee..f83267e863 100644
--- a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-controller.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-controller.yaml
@@ -3,15 +3,19 @@ apiVersion: apps/v1
metadata:
name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }}
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ app: {{ .Values.controller.name }}
+ {{- include "azurefile.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.controller.replicas }}
selector:
matchLabels:
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
app: {{ .Values.controller.name }}
template:
metadata:
-{{ include "azurefile.labels" . | indent 6 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 8 }}
app: {{ .Values.controller.name }}
spec:
hostNetwork: {{ .Values.controller.hostNetwork }}
@@ -54,13 +58,7 @@ spec:
volumeMounts:
- mountPath: /csi
name: socket-dir
- resources:
- limits:
- cpu: 100m
- memory: 300Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
- name: csi-attacher
{{- if hasPrefix "/" .Values.image.csiAttacher.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}"
@@ -79,13 +77,7 @@ spec:
volumeMounts:
- mountPath: /csi
name: socket-dir
- resources:
- limits:
- cpu: 100m
- memory: 200Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }}
- name: csi-snapshotter
{{- if eq .Values.snapshot.apiVersion "beta" }}
{{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }}
@@ -110,13 +102,7 @@ spec:
volumeMounts:
- name: socket-dir
mountPath: /csi
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }}
- name: csi-resizer
{{- if hasPrefix "/" .Values.image.csiResizer.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}"
@@ -135,13 +121,7 @@ spec:
volumeMounts:
- name: socket-dir
mountPath: /csi
- resources:
- limits:
- cpu: 100m
- memory: 300Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }}
- name: liveness-probe
{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
@@ -157,13 +137,7 @@ spec:
volumeMounts:
- name: socket-dir
mountPath: /csi
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
- name: azurefile
{{- if hasPrefix "/" .Values.image.azurefile.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
@@ -180,6 +154,7 @@ spec:
- "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}"
- "--custom-user-agent={{ .Values.driver.customUserAgent }}"
- "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}"
+ - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}"
ports:
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz
@@ -218,13 +193,7 @@ spec:
mountPath: /etc/pki/ca-trust/extracted
readOnly: true
{{- end }}
- resources:
- limits:
- cpu: 200m
- memory: 200Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.controller.resources.azurefile | nindent 12 }}
volumes:
- name: socket-dir
emptyDir: {}
diff --git a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-driver.yaml b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-driver.yaml
index 87a73a98d9..416d716e34 100644
--- a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-driver.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-driver.yaml
@@ -3,6 +3,8 @@ apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: {{ .Values.driver.name }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
annotations:
csiDriver: "{{ .Values.image.azurefile.tag }}"
{{- if eq .Values.snapshot.apiVersion "beta" }}
diff --git a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml
index de81f25865..cdc45e7691 100644
--- a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml
@@ -4,7 +4,9 @@ apiVersion: apps/v1
metadata:
name: {{ .Values.windows.dsName }}
namespace: {{ .Release.Namespace }}
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ app: {{ .Values.windows.dsName }}
+ {{- include "azurefile.labels" . | nindent 4 }}
spec:
updateStrategy:
rollingUpdate:
@@ -13,10 +15,12 @@ spec:
selector:
matchLabels:
app: {{ .Values.windows.dsName }}
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
template:
metadata:
-{{ include "azurefile.labels" . | indent 6 }}
+ labels:
app: {{ .Values.windows.dsName }}
+ {{- include "azurefile.labels" . | nindent 8 }}
spec:
serviceAccountName: {{ .Values.serviceAccount.node }}
{{- with .Values.windows.tolerations }}
@@ -52,13 +56,7 @@ spec:
- name: CSI_ENDPOINT
value: unix://C:\\csi\\csi.sock
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
- resources:
- limits:
- cpu: 200m
- memory: 200Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
@@ -75,8 +73,8 @@ spec:
- /csi-node-driver-registrar.exe
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
- initialDelaySeconds: 30
- timeoutSeconds: 15
+ initialDelaySeconds: 60
+ timeoutSeconds: 30
env:
- name: CSI_ENDPOINT
value: unix://C:\\csi\\csi.sock
@@ -94,13 +92,7 @@ spec:
mountPath: C:\csi
- name: registration-dir
mountPath: C:\registration
- resources:
- limits:
- cpu: 200m
- memory: 200Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }}
- name: azurefile
{{- if hasPrefix "/" .Values.image.azurefile.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
@@ -118,6 +110,8 @@ spec:
- "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}"
- "--custom-user-agent={{ .Values.driver.customUserAgent }}"
- "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}"
+ - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}"
+ - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}"
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
@@ -162,13 +156,7 @@ spec:
mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1
- name: csi-proxy-smb-pipe-v1beta1
mountPath: \\.\pipe\csi-proxy-smb-v1beta1
- resources:
- limits:
- cpu: 400m
- memory: 400Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.windows.resources.azurefile | nindent 12 }}
volumes:
- name: csi-proxy-fs-pipe-v1
hostPath:
diff --git a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node.yaml b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node.yaml
index 57906d4726..4f6c8e21f4 100644
--- a/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/csi-azurefile-node.yaml
@@ -4,7 +4,9 @@ apiVersion: apps/v1
metadata:
name: {{ .Values.linux.dsName }}
namespace: {{ .Release.Namespace }}
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ app: {{ .Values.linux.dsName }}
+ {{- include "azurefile.labels" . | nindent 4 }}
spec:
updateStrategy:
rollingUpdate:
@@ -13,10 +15,12 @@ spec:
selector:
matchLabels:
app: {{ .Values.linux.dsName }}
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
template:
metadata:
-{{ include "azurefile.labels" . | indent 6 }}
+ labels:
app: {{ .Values.linux.dsName }}
+ {{- include "azurefile.labels" . | nindent 8 }}
spec:
hostNetwork: true
dnsPolicy: {{ .Values.linux.dnsPolicy }}
@@ -51,13 +55,7 @@ spec:
- --health-port={{ .Values.node.livenessProbe.healthPort }}
- --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }}
- name: node-driver-registrar
{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
@@ -68,10 +66,6 @@ spec:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=2
- lifecycle:
- preStop:
- exec:
- command: ["/bin/sh", "-c", "rm -rf /registration/{{ .Values.driver.name }}-reg.sock /csi/csi.sock"]
livenessProbe:
exec:
command:
@@ -91,13 +85,7 @@ spec:
mountPath: /csi
- name: registration-dir
mountPath: /registration
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }}
- name: azurefile
{{- if hasPrefix "/" .Values.image.azurefile.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
@@ -115,6 +103,8 @@ spec:
- "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}"
- "--custom-user-agent={{ .Values.driver.customUserAgent }}"
- "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}"
+ - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}"
+ - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}"
ports:
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz
@@ -162,13 +152,7 @@ spec:
mountPath: /etc/pki/ca-trust/extracted
readOnly: true
{{- end }}
- resources:
- limits:
- cpu: 400m
- memory: 300Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ resources: {{- toYaml .Values.linux.resources.azurefile | nindent 12 }}
volumes:
- hostPath:
path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}
diff --git a/charts/latest/azurefile-csi-driver/templates/csi-snapshot-controller.yaml b/charts/latest/azurefile-csi-driver/templates/csi-snapshot-controller.yaml
index 5e79c8dbf9..33fe68dc42 100644
--- a/charts/latest/azurefile-csi-driver/templates/csi-snapshot-controller.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/csi-snapshot-controller.yaml
@@ -4,16 +4,20 @@ apiVersion: apps/v1
metadata:
name: {{ .Values.snapshot.snapshotController.name}}
namespace: {{ .Release.Namespace }}
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ app: {{ .Values.snapshot.snapshotController.name}}
+ {{- include "azurefile.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.snapshot.snapshotController.replicas }}
selector:
matchLabels:
app: {{ .Values.snapshot.snapshotController.name}}
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
template:
metadata:
-{{ include "azurefile.labels" . | indent 6 }}
+ labels:
app: {{ .Values.snapshot.snapshotController.name}}
+ {{- include "azurefile.labels" . | nindent 8 }}
spec:
serviceAccountName: {{ .Values.serviceAccount.snapshotController }}
nodeSelector:
@@ -48,13 +52,7 @@ spec:
{{- end }}
args:
- "--v=2"
- - "--leader-election=false"
- resources:
- limits:
- cpu: 100m
- memory: 100Mi
- requests:
- cpu: 10m
- memory: 20Mi
+ - "--leader-election=true"
+ resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }}
imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }}
{{- end -}}
diff --git a/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml b/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml
index 99b5e942af..3b3513c8f9 100644
--- a/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml
@@ -3,7 +3,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-provisioner-role
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
@@ -39,7 +40,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-provisioner-binding
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.controller }}
@@ -55,7 +57,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-attacher-role
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
@@ -81,7 +84,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-attacher-binding
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.controller }}
@@ -97,7 +101,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-snapshotter-role
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["events"]
@@ -126,7 +131,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-snapshotter-binding
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.controller }}
@@ -141,7 +147,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-external-resizer-role
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
@@ -163,7 +170,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Values.rbac.name }}-csi-resizer-role
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.controller }}
@@ -178,6 +186,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-{{ .Values.rbac.name }}-controller-secret-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets"]
@@ -188,6 +198,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-{{ .Values.rbac.name }}-controller-secret-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.controller }}
diff --git a/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml b/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml
index b4a30373ce..485556144a 100644
--- a/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml
@@ -4,6 +4,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-{{ .Values.rbac.name }}-node-secret-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets"]
@@ -14,6 +16,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-{{ .Values.rbac.name }}-node-secret-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.node }}
diff --git a/charts/latest/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/latest/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml
index bbca06976f..58600f1211 100644
--- a/charts/latest/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml
@@ -3,6 +3,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshot-controller-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
@@ -34,6 +36,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshot-controller-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.snapshotController }}
@@ -48,6 +52,8 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshot-controller-leaderelection-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
@@ -58,6 +64,8 @@ kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshot-controller-leaderelection-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceAccount.snapshotController }}
diff --git a/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml b/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml
index 49ba3bf970..66e0726acb 100644
--- a/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml
@@ -4,5 +4,6 @@ kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount.controller }}
namespace: {{ .Release.Namespace }}
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
{{- end -}}
diff --git a/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml b/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml
index af58cf09b9..697b8db390 100644
--- a/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml
@@ -4,5 +4,6 @@ kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount.node }}
namespace: {{ .Release.Namespace }}
-{{ include "azurefile.labels" . | indent 2 }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
{{- end -}}
diff --git a/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml
index 7cdaad0b4f..e77ef8f991 100644
--- a/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml
+++ b/charts/latest/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml
@@ -4,4 +4,6 @@ kind: ServiceAccount
metadata:
name: {{ .Values.serviceAccount.snapshotController }}
namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
{{- end -}}
diff --git a/charts/latest/azurefile-csi-driver/values.yaml b/charts/latest/azurefile-csi-driver/values.yaml
index 67cfcd24c5..6c0870c7f1 100755
--- a/charts/latest/azurefile-csi-driver/values.yaml
+++ b/charts/latest/azurefile-csi-driver/values.yaml
@@ -2,7 +2,7 @@ image:
baseRepo: mcr.microsoft.com
azurefile:
repository: /k8s/csi/azurefile-csi
- tag: latest
+ tag: v1.8.0
pullPolicy: IfNotPresent
csiProvisioner:
repository: /oss/kubernetes-csi/csi-provisioner
@@ -18,11 +18,11 @@ image:
pullPolicy: IfNotPresent
livenessProbe:
repository: /oss/kubernetes-csi/livenessprobe
- tag: v2.4.0
+ tag: v2.5.0
pullPolicy: IfNotPresent
nodeDriverRegistrar:
repository: /oss/kubernetes-csi/csi-node-driver-registrar
- tag: v2.3.0
+ tag: v2.4.0
pullPolicy: IfNotPresent
## Reference to one or more secrets to be used when pulling images
@@ -30,6 +30,10 @@ image:
imagePullSecrets: []
# - name: myRegistryKeySecretName
+# -- Custom labels to add into metadata
+customLabels: {}
+ # k8s-app: azurefile-csi-driver
+
serviceAccount:
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-azurefile-controller-sa # Name of Service Account to be created or used
@@ -44,6 +48,7 @@ controller:
name: csi-azurefile-controller
cloudConfigSecretName: azure-cloud-provider
cloudConfigSecretNamespace: kube-system
+ allowEmptyCloudConfig: true
replicas: 2
hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting
metricsPort: 29614
@@ -52,6 +57,49 @@ controller:
runOnMaster: false
attachRequired: false
logLevel: 5
+ resources:
+ csiProvisioner:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ csiAttacher:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ csiResizer:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ csiSnapshotter:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ livenessProbe:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ azurefile:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
kubeconfig: ""
tolerations:
- key: "node-role.kubernetes.io/master"
@@ -64,6 +112,7 @@ controller:
node:
cloudConfigSecretName: azure-cloud-provider
cloudConfigSecretNamespace: kube-system
+ allowEmptyCloudConfig: true
metricsPort: 29615
livenessProbe:
healthPort: 29613
@@ -85,10 +134,18 @@ snapshot:
pullPolicy: IfNotPresent
snapshotController:
name: csi-snapshot-controller
- replicas: 1
+ replicas: 2
+ resources:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
feature:
enableFSGroupPolicy: false
+ enableGetVolumeStats: false
driver:
name: file.csi.azure.com
@@ -102,6 +159,28 @@ linux:
kubelet: /var/lib/kubelet
kubeconfig: ""
distro: debian # available values: debian, fedora
+ resources:
+ livenessProbe:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ nodeDriverRegistrar:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ azurefile:
+ limits:
+ cpu: 1
+ memory: 300Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
tolerations:
- operator: "Exists"
nodeAffinity:
@@ -118,6 +197,28 @@ windows:
dsName: csi-azurefile-node-win # daemonset name
kubelet: 'C:\var\lib\kubelet'
kubeconfig: 'C:\\k\\config'
+ resources:
+ livenessProbe:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ nodeDriverRegistrar:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ azurefile:
+ limits:
+ cpu: 1
+ memory: 400Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
tolerations:
- key: "node.kubernetes.io/os"
operator: "Exists"
diff --git a/charts/v1.8.0/azurefile-csi-driver-v1.8.0.tgz b/charts/v1.8.0/azurefile-csi-driver-v1.8.0.tgz
new file mode 100644
index 0000000000..518245fb21
Binary files /dev/null and b/charts/v1.8.0/azurefile-csi-driver-v1.8.0.tgz differ
diff --git a/charts/v1.8.0/azurefile-csi-driver/Chart.yaml b/charts/v1.8.0/azurefile-csi-driver/Chart.yaml
new file mode 100755
index 0000000000..644338a198
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: v1.8.0
+description: Azure File Container Storage Interface (CSI) Storage Plugin
+name: azurefile-csi-driver
+version: v1.8.0
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/NOTES.txt b/charts/v1.8.0/azurefile-csi-driver/templates/NOTES.txt
new file mode 100644
index 0000000000..3fadd8ad36
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/NOTES.txt
@@ -0,0 +1,5 @@
+The Azure File CSI Driver is getting deployed to your cluster.
+
+To check Azure File CSI Driver pods status, please run:
+
+ kubectl --namespace={{ .Release.Namespace }} get pods --selector="release={{ .Release.Name }}" --watch
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/_helpers.tpl b/charts/v1.8.0/azurefile-csi-driver/templates/_helpers.tpl
new file mode 100644
index 0000000000..b1bf4dc1b6
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/_helpers.tpl
@@ -0,0 +1,49 @@
+{{/* vim: set filetype=mustache: */}}
+
+{{/* Expand the name of the chart.*/}}
+{{- define "azurefile.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "azurefile.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common selectors.
+*/}}
+{{- define "azurefile.selectorLabels" -}}
+app.kubernetes.io/name: {{ template "azurefile.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Common labels.
+*/}}
+{{- define "azurefile.labels" -}}
+{{- include "azurefile.selectorLabels" . }}
+app.kubernetes.io/component: csi-driver
+app.kubernetes.io/part-of: {{ template "azurefile.name" . }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+helm.sh/chart: {{ template "azurefile.chart" . }}
+{{- if .Values.customLabels }}
+{{ toYaml .Values.customLabels }}
+{{- end }}
+{{- end -}}
+
+
+{{/* pull secrets for containers */}}
+{{- define "azurefile.pullSecrets" -}}
+{{- if .Values.imagePullSecrets }}
+imagePullSecrets:
+{{- range .Values.imagePullSecrets }}
+ - name: {{ . }}
+{{- end }}
+{{- end }}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/crd-csi-snapshot-ga.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/crd-csi-snapshot-ga.yaml
new file mode 100644
index 0000000000..efbb46b1da
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/crd-csi-snapshot-ga.yaml
@@ -0,0 +1,655 @@
+{{- if .Values.snapshot.enabled -}}
+{{- if eq .Values.snapshot.apiVersion "ga" }}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
+ creationTimestamp: null
+ name: volumesnapshots.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshot
+ listKind: VolumeSnapshotList
+ plural: volumesnapshots
+ singular: volumesnapshot
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicates if the snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
+ jsonPath: .spec.source.persistentVolumeClaimName
+ name: SourcePVC
+ type: string
+ - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
+ jsonPath: .spec.source.volumeSnapshotContentName
+ name: SourceSnapshotContent
+ type: string
+ - description: Represents the minimum size of volume required to rehydrate from this snapshot.
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: string
+ - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: SnapshotClass
+ type: string
+ - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
+ jsonPath: .status.boundVolumeSnapshotContentName
+ name: SnapshotContent
+ type: string
+ - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
+ properties:
+ source:
+ description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
+ properties:
+ persistentVolumeClaimName:
+ description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
+ type: string
+ volumeSnapshotContentName:
+ description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
+ type: string
+ type: object
+ oneOf:
+ - required: ["persistentVolumeClaimName"]
+ - required: ["volumeSnapshotContentName"]
+ volumeSnapshotClassName:
+ description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
+ type: string
+ required:
+ - source
+ type: object
+ status:
+ description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
+ properties:
+ boundVolumeSnapshotContentName:
+ description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
+ type: string
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
+ format: date-time
+ type: string
+ error:
+ description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ type: string
+ description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Indicates if the snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: If a new snapshot needs to be created, this contains the name of the source PVC from which this snapshot was (or will be) created.
+ jsonPath: .spec.source.persistentVolumeClaimName
+ name: SourcePVC
+ type: string
+ - description: If a snapshot already exists, this contains the name of the existing VolumeSnapshotContent object representing the existing snapshot.
+ jsonPath: .spec.source.volumeSnapshotContentName
+ name: SourceSnapshotContent
+ type: string
+ - description: Represents the minimum size of volume required to rehydrate from this snapshot.
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: string
+ - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: SnapshotClass
+ type: string
+ - description: Name of the VolumeSnapshotContent object to which the VolumeSnapshot object intends to bind to. Please note that verification of binding actually requires checking both VolumeSnapshot and VolumeSnapshotContent to ensure both are pointing at each other. Binding MUST be verified prior to usage of this object.
+ jsonPath: .status.boundVolumeSnapshotContentName
+ name: SnapshotContent
+ type: string
+ - description: Timestamp when the point-in-time snapshot was taken by the underlying storage system.
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ # This indicates the v1beta1 version of the custom resource is deprecated.
+ # API requests to this version receive a warning in the server response.
+ deprecated: true
+ # This overrides the default warning returned to clients making v1beta1 API requests.
+ deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshot is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshot"
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'spec defines the desired characteristics of a snapshot requested by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots Required.'
+ properties:
+ source:
+ description: source specifies where a snapshot will be created from. This field is immutable after creation. Required.
+ properties:
+ persistentVolumeClaimName:
+ description: persistentVolumeClaimName specifies the name of the PersistentVolumeClaim object representing the volume from which a snapshot should be created. This PVC is assumed to be in the same namespace as the VolumeSnapshot object. This field should be set if the snapshot does not exists, and needs to be created. This field is immutable.
+ type: string
+ volumeSnapshotContentName:
+ description: volumeSnapshotContentName specifies the name of a pre-existing VolumeSnapshotContent object representing an existing volume snapshot. This field should be set if the snapshot already exists and only needs a representation in Kubernetes. This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: 'VolumeSnapshotClassName is the name of the VolumeSnapshotClass requested by the VolumeSnapshot. VolumeSnapshotClassName may be left nil to indicate that the default SnapshotClass should be used. A given cluster may have multiple default Volume SnapshotClasses: one default per CSI Driver. If a VolumeSnapshot does not specify a SnapshotClass, VolumeSnapshotSource will be checked to figure out what the associated CSI Driver is, and the default VolumeSnapshotClass associated with that CSI Driver will be used. If more than one VolumeSnapshotClass exist for a given CSI Driver and more than one have been marked as default, CreateSnapshot will fail and generate an event. Empty string is not allowed for this field.'
+ type: string
+ required:
+ - source
+ type: object
+ status:
+ description: status represents the current information of a snapshot. Consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.
+ properties:
+ boundVolumeSnapshotContentName:
+ description: 'boundVolumeSnapshotContentName is the name of the VolumeSnapshotContent object to which this VolumeSnapshot object intends to bind to. If not specified, it indicates that the VolumeSnapshot object has not been successfully bound to a VolumeSnapshotContent object yet. NOTE: To avoid possible security issues, consumers must verify binding between VolumeSnapshot and VolumeSnapshotContent objects is successful (by validating that both VolumeSnapshot and VolumeSnapshotContent point at each other) before using this object.'
+ type: string
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it may indicate that the creation time of the snapshot is unknown.
+ format: date-time
+ type: string
+ error:
+ description: error is the last observed error during snapshot creation, if any. This field could be helpful to upper level controllers(i.e., application controller) to decide whether they should continue on waiting for the snapshot to be created based on the type of error reported. The snapshot controller will keep retrying when an error occurrs during the snapshot creation. Upon success, this error field will be cleared.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if the snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ type: string
+ description: restoreSize represents the minimum size of volume required to create a volume from this snapshot. In dynamic snapshot creation case, this field will be filled in by the snapshot controller with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
+ creationTimestamp: null
+ name: volumesnapshotclasses.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotClass
+ listKind: VolumeSnapshotClassList
+ plural: volumesnapshotclasses
+ singular: volumesnapshotclass
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .driver
+ name: Driver
+ type: string
+ - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ jsonPath: .deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ deletionPolicy:
+ description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+ - additionalPrinterColumns:
+ - jsonPath: .driver
+ name: Driver
+ type: string
+ - description: Determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ jsonPath: .deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ # This indicates the v1beta1 version of the custom resource is deprecated.
+ # API requests to this version receive a warning in the server response.
+ deprecated: true
+ # This overrides the default warning returned to clients making v1beta1 API requests.
+ deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotClass is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotClass"
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ deletionPolicy:
+ description: deletionPolicy determines whether a VolumeSnapshotContent created through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the storage driver that handles this VolumeSnapshotClass. Required.
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: parameters is a key-value map with storage driver specific parameters for creating snapshots. These values are opaque to Kubernetes.
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ type: object
+ served: true
+ storage: false
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.4.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/419"
+ creationTimestamp: null
+ name: volumesnapshotcontents.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotContent
+ listKind: VolumeSnapshotContentList
+ plural: volumesnapshotcontents
+ singular: volumesnapshotcontent
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicates if the snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Represents the complete size of the snapshot in bytes
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: integer
+ - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
+ jsonPath: .spec.deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
+ jsonPath: .spec.driver
+ name: Driver
+ type: string
+ - description: Name of the VolumeSnapshotClass to which this snapshot belongs.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: VolumeSnapshotClass
+ type: string
+ - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
+ jsonPath: .spec.volumeSnapshotRef.name
+ name: VolumeSnapshot
+ type: string
+ - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
+ jsonPath: .spec.volumeSnapshotRef.namespace
+ name: VolumeSnapshotNamespace
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
+ properties:
+ deletionPolicy:
+ description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
+ type: string
+ source:
+ description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
+ properties:
+ snapshotHandle:
+ description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
+ type: string
+ volumeHandle:
+ description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
+ type: string
+ type: object
+ oneOf:
+ - required: ["snapshotHandle"]
+ - required: ["volumeHandle"]
+ volumeSnapshotClassName:
+ description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
+ type: string
+ volumeSnapshotRef:
+ description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ - source
+ - volumeSnapshotRef
+ type: object
+ status:
+ description: status represents the current information of a snapshot.
+ properties:
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
+ format: int64
+ type: integer
+ error:
+ description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
+ format: int64
+ minimum: 0
+ type: integer
+ snapshotHandle:
+ description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - description: Indicates if the snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Represents the complete size of the snapshot in bytes
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: integer
+ - description: Determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted.
+ jsonPath: .spec.deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - description: Name of the CSI driver used to create the physical snapshot on the underlying storage system.
+ jsonPath: .spec.driver
+ name: Driver
+ type: string
+ - description: Name of the VolumeSnapshotClass to which this snapshot belongs.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: VolumeSnapshotClass
+ type: string
+ - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
+ jsonPath: .spec.volumeSnapshotRef.name
+ name: VolumeSnapshot
+ type: string
+ - description: Namespace of the VolumeSnapshot object to which this VolumeSnapshotContent object is bound.
+ jsonPath: .spec.volumeSnapshotRef.namespace
+ name: VolumeSnapshotNamespace
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ # This indicates the v1beta1 version of the custom resource is deprecated.
+ # API requests to this version receive a warning in the server response.
+ deprecated: true
+ # This overrides the default warning returned to clients making v1beta1 API requests.
+ deprecationWarning: "snapshot.storage.k8s.io/v1beta1 VolumeSnapshotContent is deprecated; use snapshot.storage.k8s.io/v1 VolumeSnapshotContent"
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: spec defines properties of a VolumeSnapshotContent created by the underlying storage system. Required.
+ properties:
+ deletionPolicy:
+ description: deletionPolicy determines whether this VolumeSnapshotContent and its physical snapshot on the underlying storage system should be deleted when its bound VolumeSnapshot is deleted. Supported values are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent and its physical snapshot on underlying storage system are deleted. For dynamically provisioned snapshots, this field will automatically be filled in by the CSI snapshotter sidecar with the "DeletionPolicy" field defined in the corresponding VolumeSnapshotClass. For pre-existing snapshots, users MUST specify this field when creating the VolumeSnapshotContent object. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the CSI driver used to create the physical snapshot on the underlying storage system. This MUST be the same as the name returned by the CSI GetPluginName() call for that driver. Required.
+ type: string
+ source:
+ description: source specifies whether the snapshot is (or should be) dynamically provisioned or already exists, and just requires a Kubernetes object representation. This field is immutable after creation. Required.
+ properties:
+ snapshotHandle:
+ description: snapshotHandle specifies the CSI "snapshot_id" of a pre-existing snapshot on the underlying storage system for which a Kubernetes object representation was (or should be) created. This field is immutable.
+ type: string
+ volumeHandle:
+ description: volumeHandle specifies the CSI "volume_id" of the volume from which a snapshot should be dynamically taken from. This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: name of the VolumeSnapshotClass from which this snapshot was (or will be) created. Note that after provisioning, the VolumeSnapshotClass may be deleted or recreated with different set of values, and as such, should not be referenced post-snapshot creation.
+ type: string
+ volumeSnapshotRef:
+ description: volumeSnapshotRef specifies the VolumeSnapshot object to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName field must reference to this VolumeSnapshotContent's name for the bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent object, name and namespace of the VolumeSnapshot object MUST be provided for binding to happen. This field is immutable after creation. Required.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ - source
+ - volumeSnapshotRef
+ type: object
+ status:
+ description: status represents the current information of a snapshot.
+ properties:
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time snapshot is taken by the underlying storage system. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "creation_time" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "creation_time" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. If not specified, it indicates the creation time is unknown. The format of this field is a Unix nanoseconds time encoded as an int64. On Unix, the command `date +%s%N` returns the current time in nanoseconds since 1970-01-01 00:00:00 UTC.
+ format: int64
+ type: integer
+ error:
+ description: error is the last observed error during snapshot creation, if any. Upon success after retry, this error field will be cleared.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error during snapshot creation if specified. NOTE: message may be logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used to restore a volume. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "ready_to_use" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "ready_to_use" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, this field will be set to "True". If not specified, it means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ description: restoreSize represents the complete size of the snapshot in bytes. In dynamic snapshot creation case, this field will be filled in by the CSI snapshotter sidecar with the "size_bytes" value returned from CSI "CreateSnapshot" gRPC call. For a pre-existing snapshot, this field will be filled with the "size_bytes" value returned from the CSI "ListSnapshots" gRPC call if the driver supports it. When restoring a volume from this snapshot, the size of the volume MUST NOT be smaller than the restoreSize if it is specified, otherwise the restoration will fail. If not specified, it indicates that the size is unknown.
+ format: int64
+ minimum: 0
+ type: integer
+ snapshotHandle:
+ description: snapshotHandle is the CSI "snapshot_id" of a snapshot on the underlying storage system. If not specified, it indicates that dynamic snapshot creation has either failed or it is still in progress.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+{{- end -}}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/crd-csi-snapshot.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/crd-csi-snapshot.yaml
new file mode 100644
index 0000000000..2ff433a70a
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/crd-csi-snapshot.yaml
@@ -0,0 +1,504 @@
+{{- if .Values.snapshot.enabled -}}
+{{- if eq .Values.snapshot.apiVersion "beta" }}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139"
+ creationTimestamp: null
+ name: volumesnapshots.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshot
+ listKind: VolumeSnapshotList
+ plural: volumesnapshots
+ singular: volumesnapshot
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicates if a snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Name of the source PVC from where a dynamically taken snapshot
+ will be created.
+ jsonPath: .spec.source.persistentVolumeClaimName
+ name: SourcePVC
+ type: string
+ - description: Name of the VolumeSnapshotContent which represents a pre-provisioned
+ snapshot.
+ jsonPath: .spec.source.volumeSnapshotContentName
+ name: SourceSnapshotContent
+ type: string
+ - description: Represents the complete size of the snapshot.
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: string
+ - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: SnapshotClass
+ type: string
+ - description: The name of the VolumeSnapshotContent to which this VolumeSnapshot
+ is bound.
+ jsonPath: .status.boundVolumeSnapshotContentName
+ name: SnapshotContent
+ type: string
+ - description: Timestamp when the point-in-time snapshot is taken by the underlying
+ storage system.
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshot is a user's request for either creating a point-in-time
+ snapshot of a persistent volume, or binding to a pre-existing snapshot.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'spec defines the desired characteristics of a snapshot requested
+ by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
+ Required.'
+ properties:
+ source:
+ description: source specifies where a snapshot will be created from.
+ This field is immutable after creation. Required.
+ properties:
+ persistentVolumeClaimName:
+ description: persistentVolumeClaimName specifies the name of the
+ PersistentVolumeClaim object in the same namespace as the VolumeSnapshot
+ object where the snapshot should be dynamically taken from.
+ This field is immutable.
+ type: string
+ volumeSnapshotContentName:
+ description: volumeSnapshotContentName specifies the name of a
+ pre-existing VolumeSnapshotContent object. This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass
+ requested by the VolumeSnapshot. If not specified, the default snapshot
+ class will be used if one exists. If not specified, and there is
+ no default snapshot class, dynamic snapshot creation will fail.
+ Empty string is not allowed for this field. TODO(xiangqian): a webhook
+ validation on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes'
+ type: string
+ required:
+ - source
+ type: object
+ status:
+ description: 'status represents the current information of a snapshot.
+ NOTE: status can be modified by sources other than system controllers,
+ and must not be depended upon for accuracy. Controllers should only
+ use information from the VolumeSnapshotContent object after verifying
+ that the binding is accurate and complete.'
+ properties:
+ boundVolumeSnapshotContentName:
+ description: 'boundVolumeSnapshotContentName represents the name of
+ the VolumeSnapshotContent object to which the VolumeSnapshot object
+ is bound. If not specified, it indicates that the VolumeSnapshot
+ object has not been successfully bound to a VolumeSnapshotContent
+ object yet. NOTE: Specified boundVolumeSnapshotContentName alone
+ does not mean binding is valid. Controllers MUST always verify
+ bidirectional binding between VolumeSnapshot and VolumeSnapshotContent
+ to avoid possible security issues.'
+ type: string
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time
+ snapshot is taken by the underlying storage system. In dynamic snapshot
+ creation case, this field will be filled in with the "creation_time"
+ value returned from CSI "CreateSnapshotRequest" gRPC call. For a
+ pre-existing snapshot, this field will be filled with the "creation_time"
+ value returned from the CSI "ListSnapshots" gRPC call if the driver
+ supports it. If not specified, it indicates that the creation time
+ of the snapshot is unknown.
+ format: date-time
+ type: string
+ error:
+ description: error is the last observed error during snapshot creation,
+ if any. This field could be helpful to upper level controllers(i.e.,
+ application controller) to decide whether they should continue on
+ waiting for the snapshot to be created based on the type of error
+ reported.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error
+ during snapshot creation if specified. NOTE: message may be
+ logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used
+ to restore a volume. In dynamic snapshot creation case, this field
+ will be filled in with the "ready_to_use" value returned from CSI
+ "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot,
+ this field will be filled with the "ready_to_use" value returned
+ from the CSI "ListSnapshots" gRPC call if the driver supports it,
+ otherwise, this field will be set to "True". If not specified, it
+ means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ type: string
+ description: restoreSize represents the complete size of the snapshot
+ in bytes. In dynamic snapshot creation case, this field will be
+ filled in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
+ gRPC call. For a pre-existing snapshot, this field will be filled
+ with the "size_bytes" value returned from the CSI "ListSnapshots"
+ gRPC call if the driver supports it. When restoring a volume from
+ this snapshot, the size of the volume MUST NOT be smaller than the
+ restoreSize if it is specified, otherwise the restoration will fail.
+ If not specified, it indicates that the size is unknown.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139"
+ creationTimestamp: null
+ name: volumesnapshotclasses.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotClass
+ listKind: VolumeSnapshotClassList
+ plural: volumesnapshotclasses
+ singular: volumesnapshotclass
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .driver
+ name: Driver
+ type: string
+ - description: Determines whether a VolumeSnapshotContent created through the
+ VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ jsonPath: .deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotClass specifies parameters that a underlying storage
+ system uses when creating a volume snapshot. A specific VolumeSnapshotClass
+ is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
+ are non-namespaced
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ deletionPolicy:
+ description: deletionPolicy determines whether a VolumeSnapshotContent
+ created through the VolumeSnapshotClass should be deleted when its bound
+ VolumeSnapshot is deleted. Supported values are "Retain" and "Delete".
+ "Retain" means that the VolumeSnapshotContent and its physical snapshot
+ on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent
+ and its physical snapshot on underlying storage system are deleted.
+ Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the storage driver that handles this
+ VolumeSnapshotClass. Required.
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: parameters is a key-value map with storage driver specific
+ parameters for creating snapshots. These values are opaque to Kubernetes.
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139"
+ creationTimestamp: null
+ name: volumesnapshotcontents.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotContent
+ listKind: VolumeSnapshotContentList
+ plural: volumesnapshotcontents
+ singular: volumesnapshotcontent
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicates if a snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Represents the complete size of the snapshot in bytes
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: integer
+ - description: Determines whether this VolumeSnapshotContent and its physical
+ snapshot on the underlying storage system should be deleted when its bound
+ VolumeSnapshot is deleted.
+ jsonPath: .spec.deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - description: Name of the CSI driver used to create the physical snapshot on
+ the underlying storage system.
+ jsonPath: .spec.driver
+ name: Driver
+ type: string
+ - description: Name of the VolumeSnapshotClass to which this snapshot belongs.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: VolumeSnapshotClass
+ type: string
+ - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent
+ object is bound.
+ jsonPath: .spec.volumeSnapshotRef.name
+ name: VolumeSnapshot
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotContent represents the actual "on-disk" snapshot
+ object in the underlying storage system
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: spec defines properties of a VolumeSnapshotContent created
+ by the underlying storage system. Required.
+ properties:
+ deletionPolicy:
+ description: deletionPolicy determines whether this VolumeSnapshotContent
+ and its physical snapshot on the underlying storage system should
+ be deleted when its bound VolumeSnapshot is deleted. Supported values
+ are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
+ and its physical snapshot on underlying storage system are kept.
+ "Delete" means that the VolumeSnapshotContent and its physical snapshot
+ on underlying storage system are deleted. In dynamic snapshot creation
+ case, this field will be filled in with the "DeletionPolicy" field
+ defined in the VolumeSnapshotClass the VolumeSnapshot refers to.
+ For pre-existing snapshots, users MUST specify this field when creating
+ the VolumeSnapshotContent object. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the CSI driver used to create the
+ physical snapshot on the underlying storage system. This MUST be
+ the same as the name returned by the CSI GetPluginName() call for
+ that driver. Required.
+ type: string
+ source:
+ description: source specifies from where a snapshot will be created.
+ This field is immutable after creation. Required.
+ properties:
+ snapshotHandle:
+ description: snapshotHandle specifies the CSI "snapshot_id" of
+ a pre-existing snapshot on the underlying storage system. This
+ field is immutable.
+ type: string
+ volumeHandle:
+ description: volumeHandle specifies the CSI "volume_id" of the
+ volume from which a snapshot should be dynamically taken from.
+ This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: name of the VolumeSnapshotClass to which this snapshot
+ belongs.
+ type: string
+ volumeSnapshotRef:
+ description: volumeSnapshotRef specifies the VolumeSnapshot object
+ to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
+ field must reference to this VolumeSnapshotContent's name for the
+ bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
+ object, name and namespace of the VolumeSnapshot object MUST be
+ provided for binding to happen. This field is immutable after creation.
+ Required.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of
+ an entire object, this string should contain a valid JSON/Go
+ field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within
+ a pod, this would take on a value like: "spec.containers{name}"
+ (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]"
+ (container with index 2 in this pod). This syntax is chosen
+ only to have some well-defined way of referencing a part of
+ an object. TODO: this design is not final and this field is
+ subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference
+ is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ - source
+ - volumeSnapshotRef
+ type: object
+ status:
+ description: status represents the current information of a snapshot.
+ properties:
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time
+ snapshot is taken by the underlying storage system. In dynamic snapshot
+ creation case, this field will be filled in with the "creation_time"
+ value returned from CSI "CreateSnapshotRequest" gRPC call. For a
+ pre-existing snapshot, this field will be filled with the "creation_time"
+ value returned from the CSI "ListSnapshots" gRPC call if the driver
+ supports it. If not specified, it indicates the creation time is
+ unknown. The format of this field is a Unix nanoseconds time encoded
+ as an int64. On Unix, the command `date +%s%N` returns the current
+ time in nanoseconds since 1970-01-01 00:00:00 UTC.
+ format: int64
+ type: integer
+ error:
+ description: error is the latest observed error during snapshot creation,
+ if any.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error
+ during snapshot creation if specified. NOTE: message may be
+ logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used
+ to restore a volume. In dynamic snapshot creation case, this field
+ will be filled in with the "ready_to_use" value returned from CSI
+ "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot,
+ this field will be filled with the "ready_to_use" value returned
+ from the CSI "ListSnapshots" gRPC call if the driver supports it,
+ otherwise, this field will be set to "True". If not specified, it
+ means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ description: restoreSize represents the complete size of the snapshot
+ in bytes. In dynamic snapshot creation case, this field will be
+ filled in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
+ gRPC call. For a pre-existing snapshot, this field will be filled
+ with the "size_bytes" value returned from the CSI "ListSnapshots"
+ gRPC call if the driver supports it. When restoring a volume from
+ this snapshot, the size of the volume MUST NOT be smaller than the
+ restoreSize if it is specified, otherwise the restoration will fail.
+ If not specified, it indicates that the size is unknown.
+ format: int64
+ minimum: 0
+ type: integer
+ snapshotHandle:
+ description: snapshotHandle is the CSI "snapshot_id" of a snapshot
+ on the underlying storage system. If not specified, it indicates
+ that dynamic snapshot creation has either failed or it is still
+ in progress.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+{{- end -}}
+{{- end -}}
\ No newline at end of file
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-controller.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-controller.yaml
new file mode 100644
index 0000000000..f83267e863
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-controller.yaml
@@ -0,0 +1,211 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.controller.name }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Values.controller.name }}
+ {{- include "azurefile.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.controller.replicas }}
+ selector:
+ matchLabels:
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
+ app: {{ .Values.controller.name }}
+ template:
+ metadata:
+ labels:
+ {{- include "azurefile.labels" . | nindent 8 }}
+ app: {{ .Values.controller.name }}
+ spec:
+ hostNetwork: {{ .Values.controller.hostNetwork }}
+ serviceAccountName: {{ .Values.serviceAccount.controller }}
+ nodeSelector:
+ kubernetes.io/os: linux
+ {{- if .Values.controller.runOnMaster}}
+ kubernetes.io/role: master
+ {{- end}}
+ priorityClassName: system-cluster-critical
+{{- with .Values.controller.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+{{- with .Values.controller.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: csi-provisioner
+{{- if hasPrefix "/" .Values.image.csiProvisioner.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
+{{- else }}
+ image: "{{ .Values.image.csiProvisioner.repository }}:{{ .Values.image.csiProvisioner.tag }}"
+{{- end }}
+ args:
+ - "-v=2"
+ - "--csi-address=$(ADDRESS)"
+ - "--leader-election"
+ - "--timeout=300s"
+ - "--extra-create-metadata=true"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ imagePullPolicy: {{ .Values.image.csiProvisioner.pullPolicy }}
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ resources: {{- toYaml .Values.controller.resources.csiProvisioner | nindent 12 }}
+ - name: csi-attacher
+{{- if hasPrefix "/" .Values.image.csiAttacher.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}"
+{{- else }}
+ image: "{{ .Values.image.csiAttacher.repository }}:{{ .Values.image.csiAttacher.tag }}"
+{{- end }}
+ args:
+ - "-v=2"
+ - "-csi-address=$(ADDRESS)"
+ - "-timeout=120s"
+ - "-leader-election"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ imagePullPolicy: {{ .Values.image.csiAttacher.pullPolicy }}
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ resources: {{- toYaml .Values.controller.resources.csiAttacher | nindent 12 }}
+ - name: csi-snapshotter
+{{- if eq .Values.snapshot.apiVersion "beta" }}
+ {{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}"
+ {{- else }}
+ image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.tag }}"
+ {{- end }}
+{{- else }}
+ {{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotter.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.gaTag }}"
+ {{- else }}
+ image: "{{ .Values.snapshot.image.csiSnapshotter.repository }}:{{ .Values.snapshot.image.csiSnapshotter.gaTag }}"
+ {{- end }}
+{{- end }}
+ args:
+ - "-csi-address=$(ADDRESS)"
+ - "-leader-election"
+ - "-v=2"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources: {{- toYaml .Values.controller.resources.csiSnapshotter | nindent 12 }}
+ - name: csi-resizer
+{{- if hasPrefix "/" .Values.image.csiResizer.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}"
+{{- else }}
+ image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}"
+{{- end }}
+ args:
+ - "-csi-address=$(ADDRESS)"
+ - "-v=2"
+ - "-leader-election"
+ - '-handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }}
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }}
+ - name: liveness-probe
+{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- else }}
+ image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- end }}
+ args:
+ - --csi-address=/csi/csi.sock
+ - --probe-timeout=3s
+ - --health-port={{ .Values.controller.livenessProbe.healthPort }}
+ - --v=2
+ imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources: {{- toYaml .Values.controller.resources.livenessProbe | nindent 12 }}
+ - name: azurefile
+{{- if hasPrefix "/" .Values.image.azurefile.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
+{{- else }}
+ image: "{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
+{{- end }}
+ args:
+ - "--v={{ .Values.controller.logLevel }}"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--metrics-address=0.0.0.0:{{ .Values.controller.metricsPort }}"
+ - "--kubeconfig={{ .Values.controller.kubeconfig }}"
+ - "--drivername={{ .Values.driver.name }}"
+ - "--cloud-config-secret-name={{ .Values.controller.cloudConfigSecretName }}"
+ - "--cloud-config-secret-namespace={{ .Values.controller.cloudConfigSecretNamespace }}"
+ - "--custom-user-agent={{ .Values.driver.customUserAgent }}"
+ - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}"
+ - "--allow-empty-cloud-config={{ .Values.controller.allowEmptyCloudConfig }}"
+ ports:
+ - containerPort: {{ .Values.controller.livenessProbe.healthPort }}
+ name: healthz
+ protocol: TCP
+ - containerPort: {{ .Values.controller.metricsPort }}
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: AZURE_CREDENTIAL_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: azure-cred-file
+ key: path
+ optional: true
+ - name: CSI_ENDPOINT
+ value: unix:///csi/csi.sock
+ imagePullPolicy: {{ .Values.image.azurefile.pullPolicy }}
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ - mountPath: /etc/kubernetes/
+ name: azure-cred
+ {{- if eq .Values.linux.distro "fedora" }}
+ - name: ssl
+ mountPath: /etc/ssl/certs
+ readOnly: true
+ - name: ssl-pki
+ mountPath: /etc/pki/ca-trust/extracted
+ readOnly: true
+ {{- end }}
+ resources: {{- toYaml .Values.controller.resources.azurefile | nindent 12 }}
+ volumes:
+ - name: socket-dir
+ emptyDir: {}
+ - name: azure-cred
+ hostPath:
+ path: /etc/kubernetes/
+ type: DirectoryOrCreate
+ {{- if eq .Values.linux.distro "fedora" }}
+ - name: ssl
+ hostPath:
+ path: /etc/ssl/certs
+ - name: ssl-pki
+ hostPath:
+ path: /etc/pki/ca-trust/extracted
+ {{- end }}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-driver.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-driver.yaml
new file mode 100644
index 0000000000..416d716e34
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-driver.yaml
@@ -0,0 +1,23 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: CSIDriver
+metadata:
+ name: {{ .Values.driver.name }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+ annotations:
+ csiDriver: "{{ .Values.image.azurefile.tag }}"
+{{- if eq .Values.snapshot.apiVersion "beta" }}
+ snapshot: "{{ .Values.snapshot.image.csiSnapshotter.tag }}"
+{{- else }}
+ snapshot: "{{ .Values.snapshot.image.csiSnapshotter.gaTag }}"
+{{- end }}
+spec:
+ attachRequired: {{ .Values.controller.attachRequired }}
+ podInfoOnMount: true
+ volumeLifecycleModes:
+ - Persistent
+ - Ephemeral
+ {{- if .Values.feature.enableFSGroupPolicy}}
+ fsGroupPolicy: File
+ {{- end}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml
new file mode 100644
index 0000000000..cdc45e7691
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-node-windows.yaml
@@ -0,0 +1,195 @@
+{{- if .Values.windows.enabled}}
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.windows.dsName }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Values.windows.dsName }}
+ {{- include "azurefile.labels" . | nindent 4 }}
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: {{ .Values.node.maxUnavailable }}
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: {{ .Values.windows.dsName }}
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Values.windows.dsName }}
+ {{- include "azurefile.labels" . | nindent 8 }}
+ spec:
+ serviceAccountName: {{ .Values.serviceAccount.node }}
+{{- with .Values.windows.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ nodeSelector:
+ kubernetes.io/os: windows
+ affinity:
+ nodeAffinity:
+{{ toYaml .Values.windows.nodeAffinity | indent 10 }}
+ priorityClassName: system-node-critical
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: liveness-probe
+ volumeMounts:
+ - mountPath: C:\csi
+ name: plugin-dir
+{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- else }}
+ image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- end }}
+ args:
+ - "--csi-address=$(CSI_ENDPOINT)"
+ - "--probe-timeout=3s"
+ - "--health-port={{ .Values.node.livenessProbe.healthPort }}"
+ - "--v=2"
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
+ resources: {{- toYaml .Values.windows.resources.livenessProbe | nindent 12 }}
+ - name: node-driver-registrar
+{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- else }}
+ image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- end }}
+ args:
+ - "--csi-address=$(CSI_ENDPOINT)"
+ - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
+ - "--v=2"
+ livenessProbe:
+ exec:
+ command:
+ - /csi-node-driver-registrar.exe
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --mode=kubelet-registration-probe
+ initialDelaySeconds: 60
+ timeoutSeconds: 30
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: C:\\var\\lib\\kubelet\\plugins\\{{ .Values.driver.name }}\\csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
+ volumeMounts:
+ - name: kubelet-dir
+ mountPath: "C:\\var\\lib\\kubelet"
+ - name: plugin-dir
+ mountPath: C:\csi
+ - name: registration-dir
+ mountPath: C:\registration
+ resources: {{- toYaml .Values.windows.resources.nodeDriverRegistrar | nindent 12 }}
+ - name: azurefile
+{{- if hasPrefix "/" .Values.image.azurefile.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
+{{- else }}
+ image: "{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
+{{- end }}
+ args:
+ - "--v={{ .Values.node.logLevel }}"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--nodeid=$(KUBE_NODE_NAME)"
+ - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}"
+ - "--kubeconfig={{ .Values.windows.kubeconfig }}"
+ - "--drivername={{ .Values.driver.name }}"
+ - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}"
+ - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}"
+ - "--custom-user-agent={{ .Values.driver.customUserAgent }}"
+ - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}"
+ - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}"
+ - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}"
+ ports:
+ - containerPort: {{ .Values.node.livenessProbe.healthPort }}
+ name: healthz
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: AZURE_CREDENTIAL_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: azure-cred-file
+ key: path-windows
+ optional: true
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ volumeMounts:
+ - name: kubelet-dir
+ mountPath: "C:\\var\\lib\\kubelet"
+ - name: plugin-dir
+ mountPath: C:\csi
+ - name: azure-config
+ mountPath: C:\k
+ - name: csi-proxy-fs-pipe-v1
+ mountPath: \\.\pipe\csi-proxy-filesystem-v1
+ - name: csi-proxy-smb-pipe-v1
+ mountPath: \\.\pipe\csi-proxy-smb-v1
+ # these paths are still included for compatibility, they're used
+ # only if the node has still the beta version of the CSI proxy
+ - name: csi-proxy-fs-pipe-v1beta1
+ mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1
+ - name: csi-proxy-smb-pipe-v1beta1
+ mountPath: \\.\pipe\csi-proxy-smb-v1beta1
+ resources: {{- toYaml .Values.windows.resources.azurefile | nindent 12 }}
+ volumes:
+ - name: csi-proxy-fs-pipe-v1
+ hostPath:
+ path: \\.\pipe\csi-proxy-filesystem-v1
+ type: ""
+ - name: csi-proxy-smb-pipe-v1
+ hostPath:
+ path: \\.\pipe\csi-proxy-smb-v1
+ type: ""
+ # these paths are still included for compatibility, they're used
+ # only if the node has still the beta version of the CSI proxy
+ - name: csi-proxy-fs-pipe-v1beta1
+ hostPath:
+ path: \\.\pipe\csi-proxy-filesystem-v1beta1
+ type: ""
+ - name: csi-proxy-smb-pipe-v1beta1
+ hostPath:
+ path: \\.\pipe\csi-proxy-smb-v1beta1
+ type: ""
+ - name: registration-dir
+ hostPath:
+ path: {{ .Values.windows.kubelet }}\plugins_registry\
+ type: Directory
+ - name: kubelet-dir
+ hostPath:
+ path: {{ .Values.windows.kubelet }}\
+ type: Directory
+ - name: plugin-dir
+ hostPath:
+ path: {{ .Values.windows.kubelet }}\plugins\{{ .Values.driver.name }}\
+ type: DirectoryOrCreate
+ - name: azure-config
+ hostPath:
+ path: C:\k
+ type: Directory
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-node.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-node.yaml
new file mode 100644
index 0000000000..4f6c8e21f4
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/csi-azurefile-node.yaml
@@ -0,0 +1,185 @@
+{{- if .Values.linux.enabled}}
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.linux.dsName }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Values.linux.dsName }}
+ {{- include "azurefile.labels" . | nindent 4 }}
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: {{ .Values.node.maxUnavailable }}
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: {{ .Values.linux.dsName }}
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Values.linux.dsName }}
+ {{- include "azurefile.labels" . | nindent 8 }}
+ spec:
+ hostNetwork: true
+ dnsPolicy: {{ .Values.linux.dnsPolicy }}
+ serviceAccountName: {{ .Values.serviceAccount.node }}
+ nodeSelector:
+ kubernetes.io/os: linux
+ affinity:
+ nodeAffinity:
+{{ toYaml .Values.linux.nodeAffinity | indent 10 }}
+ priorityClassName: system-node-critical
+{{- with .Values.linux.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: liveness-probe
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+{{- if hasPrefix "/" .Values.image.livenessProbe.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- else }}
+ image: "{{ .Values.image.livenessProbe.repository }}:{{ .Values.image.livenessProbe.tag }}"
+{{- end }}
+ args:
+ - --csi-address=/csi/csi.sock
+ - --probe-timeout=3s
+ - --health-port={{ .Values.node.livenessProbe.healthPort }}
+ - --v=2
+ imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
+ resources: {{- toYaml .Values.linux.resources.livenessProbe | nindent 12 }}
+ - name: node-driver-registrar
+{{- if hasPrefix "/" .Values.image.nodeDriverRegistrar.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- else }}
+ image: "{{ .Values.image.nodeDriverRegistrar.repository }}:{{ .Values.image.nodeDriverRegistrar.tag }}"
+{{- end }}
+ args:
+ - --csi-address=$(ADDRESS)
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --v=2
+ livenessProbe:
+ exec:
+ command:
+ - /csi-node-driver-registrar
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --mode=kubelet-registration-probe
+ initialDelaySeconds: 30
+ timeoutSeconds: 15
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}/csi.sock
+ imagePullPolicy: {{ .Values.image.nodeDriverRegistrar.pullPolicy }}
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ - name: registration-dir
+ mountPath: /registration
+ resources: {{- toYaml .Values.linux.resources.nodeDriverRegistrar | nindent 12 }}
+ - name: azurefile
+{{- if hasPrefix "/" .Values.image.azurefile.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
+{{- else }}
+ image: "{{ .Values.image.azurefile.repository }}:{{ .Values.image.azurefile.tag }}"
+{{- end }}
+ args:
+ - "--v={{ .Values.node.logLevel }}"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--nodeid=$(KUBE_NODE_NAME)"
+ - "--metrics-address=0.0.0.0:{{ .Values.node.metricsPort }}"
+ - "--kubeconfig={{ .Values.linux.kubeconfig }}"
+ - "--drivername={{ .Values.driver.name }}"
+ - "--cloud-config-secret-name={{ .Values.node.cloudConfigSecretName }}"
+ - "--cloud-config-secret-namespace={{ .Values.node.cloudConfigSecretNamespace }}"
+ - "--custom-user-agent={{ .Values.driver.customUserAgent }}"
+ - "--user-agent-suffix={{ .Values.driver.userAgentSuffix }}"
+ - "--allow-empty-cloud-config={{ .Values.node.allowEmptyCloudConfig }}"
+ - "--enable-get-volume-stats={{ .Values.feature.enableGetVolumeStats }}"
+ ports:
+ - containerPort: {{ .Values.node.livenessProbe.healthPort }}
+ name: healthz
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: AZURE_CREDENTIAL_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: azure-cred-file
+ key: path
+ optional: true
+ - name: CSI_ENDPOINT
+ value: unix:///csi/csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ imagePullPolicy: {{ .Values.image.azurefile.pullPolicy }}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ - mountPath: {{ .Values.linux.kubelet }}/
+ mountPropagation: Bidirectional
+ name: mountpoint-dir
+ - mountPath: /etc/kubernetes/
+ name: azure-cred
+ - mountPath: /dev
+ name: device-dir
+ {{- if eq .Values.linux.distro "fedora" }}
+ - name: ssl
+ mountPath: /etc/ssl/certs
+ readOnly: true
+ - name: ssl-pki
+ mountPath: /etc/pki/ca-trust/extracted
+ readOnly: true
+ {{- end }}
+ resources: {{- toYaml .Values.linux.resources.azurefile | nindent 12 }}
+ volumes:
+ - hostPath:
+ path: {{ .Values.linux.kubelet }}/plugins/{{ .Values.driver.name }}
+ type: DirectoryOrCreate
+ name: socket-dir
+ - hostPath:
+ path: {{ .Values.linux.kubelet }}/
+ type: DirectoryOrCreate
+ name: mountpoint-dir
+ - hostPath:
+ path: {{ .Values.linux.kubelet }}/plugins_registry/
+ type: DirectoryOrCreate
+ name: registration-dir
+ - hostPath:
+ path: /etc/kubernetes/
+ type: DirectoryOrCreate
+ name: azure-cred
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+ {{- if eq .Values.linux.distro "fedora" }}
+ - name: ssl
+ hostPath:
+ path: /etc/ssl/certs
+ - name: ssl-pki
+ hostPath:
+ path: /etc/pki/ca-trust/extracted
+ {{- end }}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/csi-snapshot-controller.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/csi-snapshot-controller.yaml
new file mode 100644
index 0000000000..33fe68dc42
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/csi-snapshot-controller.yaml
@@ -0,0 +1,58 @@
+{{- if .Values.snapshot.enabled -}}
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ .Values.snapshot.snapshotController.name}}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: {{ .Values.snapshot.snapshotController.name}}
+ {{- include "azurefile.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.snapshot.snapshotController.replicas }}
+ selector:
+ matchLabels:
+ app: {{ .Values.snapshot.snapshotController.name}}
+ {{- include "azurefile.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ app: {{ .Values.snapshot.snapshotController.name}}
+ {{- include "azurefile.labels" . | nindent 8 }}
+ spec:
+ serviceAccountName: {{ .Values.serviceAccount.snapshotController }}
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+{{- with .Values.controller.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+{{- end }}
+{{- with .Values.controller.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+{{- end }}
+ {{- if .Values.imagePullSecrets }}
+ imagePullSecrets:
+{{ toYaml .Values.imagePullSecrets | indent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ .Values.snapshot.snapshotController.name}}
+{{- if eq .Values.snapshot.apiVersion "beta" }}
+ {{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}"
+ {{- else }}
+ image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.tag }}"
+ {{- end }}
+{{- else }}
+ {{- if hasPrefix "/" .Values.snapshot.image.csiSnapshotController.repository }}
+ image: "{{ .Values.image.baseRepo }}{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.gaTag }}"
+ {{- else }}
+ image: "{{ .Values.snapshot.image.csiSnapshotController.repository }}:{{ .Values.snapshot.image.csiSnapshotController.gaTag }}"
+ {{- end }}
+{{- end }}
+ args:
+ - "--v=2"
+ - "--leader-election=true"
+ resources: {{- toYaml .Values.snapshot.snapshotController.resources | nindent 12 }}
+ imagePullPolicy: {{ .Values.snapshot.image.csiSnapshotController.pullPolicy }}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml
new file mode 100644
index 0000000000..3b3513c8f9
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-azurefile-controller.yaml
@@ -0,0 +1,211 @@
+{{- if .Values.rbac.create -}}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-external-provisioner-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["csinodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots"]
+ verbs: ["get", "list"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["get", "list"]
+
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-csi-provisioner-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ .Values.rbac.name }}-external-provisioner-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-external-attacher-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["csi.storage.k8s.io"]
+ resources: ["csinodeinfos"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments/status"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-csi-attacher-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ .Values.rbac.name }}-external-attacher-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-external-snapshotter-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["create", "get", "list", "watch", "update", "delete"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "list", "watch", "delete"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents/status"]
+ verbs: ["update"]
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-csi-snapshotter-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ .Values.rbac.name }}-external-snapshotter-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-external-resizer-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ .Values.rbac.name }}-csi-resizer-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ .Values.rbac.name }}-external-resizer-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-{{ .Values.rbac.name }}-controller-secret-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-{{ .Values.rbac.name }}-controller-secret-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: csi-{{ .Values.rbac.name }}-controller-secret-role
+ apiGroup: rbac.authorization.k8s.io
+{{ end }}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml
new file mode 100644
index 0000000000..485556144a
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-azurefile-node.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create -}}
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-{{ .Values.rbac.name }}-node-secret-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-{{ .Values.rbac.name }}-node-secret-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.node }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: csi-{{ .Values.rbac.name }}-node-secret-role
+ apiGroup: rbac.authorization.k8s.io
+{{ end }}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml
new file mode 100644
index 0000000000..58600f1211
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/rbac-csi-snapshot-controller.yaml
@@ -0,0 +1,77 @@
+{{- if and .Values.snapshot.enabled .Values.rbac.create -}}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["create", "get", "list", "watch", "update", "delete"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots/status"]
+ verbs: ["update"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.snapshotController }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: csi-snapshot-controller-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-leaderelection-role
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-leaderelection-binding
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ .Values.serviceAccount.snapshotController }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: csi-snapshot-controller-leaderelection-role
+ apiGroup: rbac.authorization.k8s.io
+{{ end }}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml
new file mode 100644
index 0000000000..66e0726acb
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-controller.yaml
@@ -0,0 +1,9 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.serviceAccount.controller }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml
new file mode 100644
index 0000000000..697b8db390
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-azurefile-node.yaml
@@ -0,0 +1,9 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.serviceAccount.node }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml b/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml
new file mode 100644
index 0000000000..e77ef8f991
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/templates/serviceaccount-csi-snapshot-controller.yaml
@@ -0,0 +1,9 @@
+{{- if and .Values.snapshot.enabled .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ .Values.serviceAccount.snapshotController }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "azurefile.labels" . | nindent 4 }}
+{{- end -}}
diff --git a/charts/v1.8.0/azurefile-csi-driver/values.yaml b/charts/v1.8.0/azurefile-csi-driver/values.yaml
new file mode 100755
index 0000000000..6c0870c7f1
--- /dev/null
+++ b/charts/v1.8.0/azurefile-csi-driver/values.yaml
@@ -0,0 +1,233 @@
+image:
+ baseRepo: mcr.microsoft.com
+ azurefile:
+ repository: /k8s/csi/azurefile-csi
+ tag: v1.8.0
+ pullPolicy: IfNotPresent
+ csiProvisioner:
+ repository: /oss/kubernetes-csi/csi-provisioner
+ tag: v2.2.2
+ pullPolicy: IfNotPresent
+ csiAttacher:
+ repository: /oss/kubernetes-csi/csi-attacher
+ tag: v3.3.0
+ pullPolicy: IfNotPresent
+ csiResizer:
+ repository: /oss/kubernetes-csi/csi-resizer
+ tag: v1.3.0
+ pullPolicy: IfNotPresent
+ livenessProbe:
+ repository: /oss/kubernetes-csi/livenessprobe
+ tag: v2.5.0
+ pullPolicy: IfNotPresent
+ nodeDriverRegistrar:
+ repository: /oss/kubernetes-csi/csi-node-driver-registrar
+ tag: v2.4.0
+ pullPolicy: IfNotPresent
+
+## Reference to one or more secrets to be used when pulling images
+## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# - name: myRegistryKeySecretName
+
+# -- Custom labels to add into metadata
+customLabels: {}
+ # k8s-app: azurefile-csi-driver
+
+serviceAccount:
+ create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
+ controller: csi-azurefile-controller-sa # Name of Service Account to be created or used
+ node: csi-azurefile-node-sa # Name of Service Account to be created or used
+ snapshotController: csi-snapshot-controller-sa # Name of Service Account to be created or used
+
+rbac:
+ create: true
+ name: azurefile
+
+controller:
+ name: csi-azurefile-controller
+ cloudConfigSecretName: azure-cloud-provider
+ cloudConfigSecretNamespace: kube-system
+ allowEmptyCloudConfig: true
+ replicas: 2
+ hostNetwork: true # this setting could be disabled if controller does not depend on MSI setting
+ metricsPort: 29614
+ livenessProbe:
+ healthPort: 29612
+ runOnMaster: false
+ attachRequired: false
+ logLevel: 5
+ resources:
+ csiProvisioner:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ csiAttacher:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ csiResizer:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ csiSnapshotter:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ livenessProbe:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ azurefile:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ kubeconfig: ""
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Exists"
+ effect: "NoSchedule"
+ - key: "node-role.kubernetes.io/controlplane"
+ operator: "Exists"
+ effect: "NoSchedule"
+
+node:
+ cloudConfigSecretName: azure-cloud-provider
+ cloudConfigSecretNamespace: kube-system
+ allowEmptyCloudConfig: true
+ metricsPort: 29615
+ livenessProbe:
+ healthPort: 29613
+ logLevel: 5
+
+snapshot:
+ enabled: false
+ apiVersion: beta # available values: beta, ga
+ image:
+ csiSnapshotter:
+ repository: /oss/kubernetes-csi/csi-snapshotter
+ tag: v3.0.3
+ gaTag: v4.2.1
+ pullPolicy: IfNotPresent
+ csiSnapshotController:
+ repository: /oss/kubernetes-csi/snapshot-controller
+ tag: v3.0.3
+ gaTag: v4.2.1
+ pullPolicy: IfNotPresent
+ snapshotController:
+ name: csi-snapshot-controller
+ replicas: 2
+ resources:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+
+feature:
+ enableFSGroupPolicy: false
+ enableGetVolumeStats: false
+
+driver:
+ name: file.csi.azure.com
+ customUserAgent: ""
+ userAgentSuffix: "OSS-helm"
+
+linux:
+ enabled: true
+ dsName: csi-azurefile-node # daemonset name
+ dnsPolicy: Default # available values: Default, ClusterFirst, ClusterFirstWithHostNet, None
+ kubelet: /var/lib/kubelet
+ kubeconfig: ""
+ distro: debian # available values: debian, fedora
+ resources:
+ livenessProbe:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ nodeDriverRegistrar:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ azurefile:
+ limits:
+ cpu: 1
+ memory: 300Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ tolerations:
+ - operator: "Exists"
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: type
+ operator: NotIn
+ values:
+ - virtual-kubelet
+
+windows:
+ enabled: true
+ dsName: csi-azurefile-node-win # daemonset name
+ kubelet: 'C:\var\lib\kubelet'
+ kubeconfig: 'C:\\k\\config'
+ resources:
+ livenessProbe:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ nodeDriverRegistrar:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ azurefile:
+ limits:
+ cpu: 1
+ memory: 400Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ tolerations:
+ - key: "node.kubernetes.io/os"
+ operator: "Exists"
+ effect: "NoSchedule"
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: type
+ operator: NotIn
+ values:
+ - virtual-kubelet
diff --git a/deploy/csi-azurefile-controller.yaml b/deploy/csi-azurefile-controller.yaml
index 8ee5d67d79..de7f11f2b6 100644
--- a/deploy/csi-azurefile-controller.yaml
+++ b/deploy/csi-azurefile-controller.yaml
@@ -43,8 +43,8 @@ spec:
name: socket-dir
resources:
limits:
- cpu: 100m
- memory: 300Mi
+ cpu: 1
+ memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
@@ -63,8 +63,8 @@ spec:
name: socket-dir
resources:
limits:
- cpu: 100m
- memory: 200Mi
+ cpu: 1
+ memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
@@ -82,7 +82,7 @@ spec:
mountPath: /csi
resources:
limits:
- cpu: 100m
+ cpu: 1
memory: 100Mi
requests:
cpu: 10m
@@ -102,13 +102,13 @@ spec:
mountPath: /csi
resources:
limits:
- cpu: 100m
- memory: 300Mi
+ cpu: 1
+ memory: 500Mi
requests:
cpu: 10m
memory: 20Mi
- name: liveness-probe
- image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.4.0
+ image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@@ -119,13 +119,13 @@ spec:
mountPath: /csi
resources:
limits:
- cpu: 100m
+ cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azurefile
- image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.7.0
+ image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
@@ -163,7 +163,7 @@ spec:
name: azure-cred
resources:
limits:
- cpu: 200m
+ cpu: 1
memory: 200Mi
requests:
cpu: 10m
diff --git a/deploy/csi-azurefile-node-windows.yaml b/deploy/csi-azurefile-node-windows.yaml
index c45c056363..ccda117245 100644
--- a/deploy/csi-azurefile-node-windows.yaml
+++ b/deploy/csi-azurefile-node-windows.yaml
@@ -39,7 +39,7 @@ spec:
volumeMounts:
- mountPath: C:\csi
name: plugin-dir
- image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.4.0
+ image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.5.0
args:
- --csi-address=$(CSI_ENDPOINT)
- --probe-timeout=3s
@@ -50,13 +50,13 @@ spec:
value: unix://C:\\csi\\csi.sock
resources:
limits:
- cpu: 200m
+ cpu: 1
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
- image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.3.0
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.4.0
args:
- --v=2
- --csi-address=$(CSI_ENDPOINT)
@@ -67,8 +67,8 @@ spec:
- /csi-node-driver-registrar.exe
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --mode=kubelet-registration-probe
- initialDelaySeconds: 30
- timeoutSeconds: 15
+ initialDelaySeconds: 60
+ timeoutSeconds: 30
env:
- name: CSI_ENDPOINT
value: unix://C:\\csi\\csi.sock
@@ -87,13 +87,13 @@ spec:
mountPath: C:\registration
resources:
limits:
- cpu: 200m
+ cpu: 1
memory: 200Mi
requests:
cpu: 10m
memory: 20Mi
- name: azurefile
- image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.7.0
+ image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0
imagePullPolicy: IfNotPresent
args:
- --v=5
@@ -146,7 +146,7 @@ spec:
mountPath: \\.\pipe\csi-proxy-smb-v1beta1
resources:
limits:
- cpu: 400m
+ cpu: 1
memory: 400Mi
requests:
cpu: 10m
diff --git a/deploy/csi-azurefile-node.yaml b/deploy/csi-azurefile-node.yaml
index 928e72d6bb..8007d4e227 100644
--- a/deploy/csi-azurefile-node.yaml
+++ b/deploy/csi-azurefile-node.yaml
@@ -39,7 +39,7 @@ spec:
volumeMounts:
- mountPath: /csi
name: socket-dir
- image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.4.0
+ image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.5.0
args:
- --csi-address=/csi/csi.sock
- --probe-timeout=3s
@@ -47,21 +47,17 @@ spec:
- --v=2
resources:
limits:
- cpu: 100m
+ cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: node-driver-registrar
- image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.3.0
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.4.0
args:
- --csi-address=$(ADDRESS)
- --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
- --v=2
- lifecycle:
- preStop:
- exec:
- command: ["/bin/sh", "-c", "rm -rf /registration/file.csi.azure.com-reg.sock /csi/csi.sock"]
livenessProbe:
exec:
command:
@@ -82,13 +78,13 @@ spec:
mountPath: /registration
resources:
limits:
- cpu: 100m
+ cpu: 1
memory: 100Mi
requests:
cpu: 10m
memory: 20Mi
- name: azurefile
- image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.7.0
+ image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0
imagePullPolicy: IfNotPresent
args:
- "--v=5"
@@ -135,7 +131,7 @@ spec:
name: device-dir
resources:
limits:
- cpu: 400m
+ cpu: 1
memory: 300Mi
requests:
cpu: 10m
diff --git a/deploy/csi-snapshot-controller.yaml b/deploy/csi-snapshot-controller.yaml
index b8ca154c6f..28b03abafd 100644
--- a/deploy/csi-snapshot-controller.yaml
+++ b/deploy/csi-snapshot-controller.yaml
@@ -5,7 +5,7 @@ metadata:
name: csi-snapshot-controller
namespace: kube-system
spec:
- replicas: 1
+ replicas: 2
selector:
matchLabels:
app: csi-snapshot-controller
@@ -32,10 +32,10 @@ spec:
image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v3.0.3
args:
- "--v=2"
- - "--leader-election=false"
+ - "--leader-election=true"
resources:
limits:
- cpu: 100m
+ cpu: 200m
memory: 100Mi
requests:
cpu: 10m
diff --git a/deploy/example/nfs/README.md b/deploy/example/nfs/README.md
index 838fa4566e..7cf9f501f4 100644
--- a/deploy/example/nfs/README.md
+++ b/deploy/example/nfs/README.md
@@ -1,28 +1,16 @@
## NFS support
-[NFS 4.1 support for Azure Files](https://azure.microsoft.com/en-us/blog/nfs-41-support-for-azure-files-is-now-in-preview/preview/) is now in Public Preview. This service is optimized for random access workloads with in-place data updates and provides full POSIX file system support. This page shows how to use NFS feature by Azure File CSI driver on Azure Kubernetes cluster.
+[NFS 4.1 support for Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/files-nfs-protocol) is optimized for random access workloads with in-place data updates and provides full POSIX file system support. This page shows how to use NFS feature by Azure File CSI driver on Azure Kubernetes cluster.
- supported OS: Linux
-#### Supported CSI driver version: `v0.9.0`
-> storage account dynamic provisioning is supported from `v0.10.0`.
-
-#### [Available regions](https://aka.ms/azurefiles/nfs/preview/regions)
-We are continually adding more regions. Latest region list is available on [Azure NFS documentation](https://aka.ms/azurefiles/nfs/preview/regions)
-
#### Prerequisite
- - [Install CSI driver](https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/install-csi-driver-master.md)
- - Register `AllowNfsFileShares` feature under your subscription
-```console
-az feature register --name AllowNfsFileShares --namespace Microsoft.Storage
-az feature list -o table --query "[?contains(name, 'Microsoft.Storage/AllowNfsFileShares')].{Name:name,State:properties.state}"
-az provider register --namespace Microsoft.Storage
-```
- - [Optional] Create a `Premium_LRS` Azure storage account with following configurations to support NFS share
+ - When using AKS managed CSI driver, make sure cluster `Control plane` identity(with name `AKS Cluster Name`) has `Contributor` permission on vnet resource group
+ - [Optional] Create a `Premium_LRS` or `Premium_ZRS` Azure storage account with following configurations to support NFS share
+ > `Premium_ZRS` account type is only supported in [limited region support](https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy#zone-redundant-storage)
- account kind: `FileStorage`
- secure transfer required(enable HTTPS traffic only): `false`
- select virtual network of agent nodes in `Firewalls and virtual networks`
- specify `storageAccount` in below storage class `parameters`
- - [Optional] If cluster identity is Managed Service Identity(MSI), make sure user assigned identity has `Contributor` role on node resource group
#### How to use NFS feature
- Create an Azure File storage class
@@ -36,6 +24,7 @@ metadata:
provisioner: file.csi.azure.com
parameters:
protocol: nfs
+ skuName: Premium_LRS # available values: Premium_LRS, Premium_ZRS
```
run following commands to create a storage class:
diff --git a/deploy/v1.8.0/crd-csi-snapshot.yaml b/deploy/v1.8.0/crd-csi-snapshot.yaml
new file mode 100644
index 0000000000..e812e99fcf
--- /dev/null
+++ b/deploy/v1.8.0/crd-csi-snapshot.yaml
@@ -0,0 +1,501 @@
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139"
+ creationTimestamp: null
+ name: volumesnapshots.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshot
+ listKind: VolumeSnapshotList
+ plural: volumesnapshots
+ singular: volumesnapshot
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicates if a snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Name of the source PVC from where a dynamically taken snapshot
+ will be created.
+ jsonPath: .spec.source.persistentVolumeClaimName
+ name: SourcePVC
+ type: string
+ - description: Name of the VolumeSnapshotContent which represents a pre-provisioned
+ snapshot.
+ jsonPath: .spec.source.volumeSnapshotContentName
+ name: SourceSnapshotContent
+ type: string
+ - description: Represents the complete size of the snapshot.
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: string
+ - description: The name of the VolumeSnapshotClass requested by the VolumeSnapshot.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: SnapshotClass
+ type: string
+ - description: The name of the VolumeSnapshotContent to which this VolumeSnapshot
+ is bound.
+ jsonPath: .status.boundVolumeSnapshotContentName
+ name: SnapshotContent
+ type: string
+ - description: Timestamp when the point-in-time snapshot is taken by the underlying
+ storage system.
+ jsonPath: .status.creationTime
+ name: CreationTime
+ type: date
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshot is a user's request for either creating a point-in-time
+ snapshot of a persistent volume, or binding to a pre-existing snapshot.
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: 'spec defines the desired characteristics of a snapshot requested
+ by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
+ Required.'
+ properties:
+ source:
+ description: source specifies where a snapshot will be created from.
+ This field is immutable after creation. Required.
+ properties:
+ persistentVolumeClaimName:
+ description: persistentVolumeClaimName specifies the name of the
+ PersistentVolumeClaim object in the same namespace as the VolumeSnapshot
+ object where the snapshot should be dynamically taken from.
+ This field is immutable.
+ type: string
+ volumeSnapshotContentName:
+ description: volumeSnapshotContentName specifies the name of a
+ pre-existing VolumeSnapshotContent object. This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass
+ requested by the VolumeSnapshot. If not specified, the default snapshot
+ class will be used if one exists. If not specified, and there is
+ no default snapshot class, dynamic snapshot creation will fail.
+ Empty string is not allowed for this field. TODO(xiangqian): a webhook
+ validation on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes'
+ type: string
+ required:
+ - source
+ type: object
+ status:
+ description: 'status represents the current information of a snapshot.
+ NOTE: status can be modified by sources other than system controllers,
+ and must not be depended upon for accuracy. Controllers should only
+ use information from the VolumeSnapshotContent object after verifying
+ that the binding is accurate and complete.'
+ properties:
+ boundVolumeSnapshotContentName:
+ description: 'boundVolumeSnapshotContentName represents the name of
+ the VolumeSnapshotContent object to which the VolumeSnapshot object
+ is bound. If not specified, it indicates that the VolumeSnapshot
+ object has not been successfully bound to a VolumeSnapshotContent
+ object yet. NOTE: Specified boundVolumeSnapshotContentName alone
+ does not mean binding is valid. Controllers MUST always verify
+ bidirectional binding between VolumeSnapshot and VolumeSnapshotContent
+ to avoid possible security issues.'
+ type: string
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time
+ snapshot is taken by the underlying storage system. In dynamic snapshot
+ creation case, this field will be filled in with the "creation_time"
+ value returned from CSI "CreateSnapshotRequest" gRPC call. For a
+ pre-existing snapshot, this field will be filled with the "creation_time"
+ value returned from the CSI "ListSnapshots" gRPC call if the driver
+ supports it. If not specified, it indicates that the creation time
+ of the snapshot is unknown.
+ format: date-time
+ type: string
+ error:
+ description: error is the last observed error during snapshot creation,
+ if any. This field could be helpful to upper level controllers(i.e.,
+ application controller) to decide whether they should continue on
+ waiting for the snapshot to be created based on the type of error
+ reported.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error
+ during snapshot creation if specified. NOTE: message may be
+ logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used
+ to restore a volume. In dynamic snapshot creation case, this field
+ will be filled in with the "ready_to_use" value returned from CSI
+ "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot,
+ this field will be filled with the "ready_to_use" value returned
+ from the CSI "ListSnapshots" gRPC call if the driver supports it,
+ otherwise, this field will be set to "True". If not specified, it
+ means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ type: string
+ description: restoreSize represents the complete size of the snapshot
+ in bytes. In dynamic snapshot creation case, this field will be
+ filled in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
+ gRPC call. For a pre-existing snapshot, this field will be filled
+ with the "size_bytes" value returned from the CSI "ListSnapshots"
+ gRPC call if the driver supports it. When restoring a volume from
+ this snapshot, the size of the volume MUST NOT be smaller than the
+ restoreSize if it is specified, otherwise the restoration will fail.
+ If not specified, it indicates that the size is unknown.
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139"
+ creationTimestamp: null
+ name: volumesnapshotclasses.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotClass
+ listKind: VolumeSnapshotClassList
+ plural: volumesnapshotclasses
+ singular: volumesnapshotclass
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .driver
+ name: Driver
+ type: string
+ - description: Determines whether a VolumeSnapshotContent created through the
+ VolumeSnapshotClass should be deleted when its bound VolumeSnapshot is deleted.
+ jsonPath: .deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotClass specifies parameters that a underlying storage
+ system uses when creating a volume snapshot. A specific VolumeSnapshotClass
+ is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
+ are non-namespaced
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ deletionPolicy:
+ description: deletionPolicy determines whether a VolumeSnapshotContent
+ created through the VolumeSnapshotClass should be deleted when its bound
+ VolumeSnapshot is deleted. Supported values are "Retain" and "Delete".
+ "Retain" means that the VolumeSnapshotContent and its physical snapshot
+ on underlying storage system are kept. "Delete" means that the VolumeSnapshotContent
+ and its physical snapshot on underlying storage system are deleted.
+ Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the storage driver that handles this
+ VolumeSnapshotClass. Required.
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: parameters is a key-value map with storage driver specific
+ parameters for creating snapshots. These values are opaque to Kubernetes.
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ type: object
+ served: true
+ storage: true
+ subresources: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ api-approved.kubernetes.io: "https://github.com/kubernetes-csi/external-snapshotter/pull/139"
+ creationTimestamp: null
+ name: volumesnapshotcontents.snapshot.storage.k8s.io
+spec:
+ group: snapshot.storage.k8s.io
+ names:
+ kind: VolumeSnapshotContent
+ listKind: VolumeSnapshotContentList
+ plural: volumesnapshotcontents
+ singular: volumesnapshotcontent
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - description: Indicates if a snapshot is ready to be used to restore a volume.
+ jsonPath: .status.readyToUse
+ name: ReadyToUse
+ type: boolean
+ - description: Represents the complete size of the snapshot in bytes
+ jsonPath: .status.restoreSize
+ name: RestoreSize
+ type: integer
+ - description: Determines whether this VolumeSnapshotContent and its physical
+ snapshot on the underlying storage system should be deleted when its bound
+ VolumeSnapshot is deleted.
+ jsonPath: .spec.deletionPolicy
+ name: DeletionPolicy
+ type: string
+ - description: Name of the CSI driver used to create the physical snapshot on
+ the underlying storage system.
+ jsonPath: .spec.driver
+ name: Driver
+ type: string
+ - description: Name of the VolumeSnapshotClass to which this snapshot belongs.
+ jsonPath: .spec.volumeSnapshotClassName
+ name: VolumeSnapshotClass
+ type: string
+ - description: Name of the VolumeSnapshot object to which this VolumeSnapshotContent
+ object is bound.
+ jsonPath: .spec.volumeSnapshotRef.name
+ name: VolumeSnapshot
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: VolumeSnapshotContent represents the actual "on-disk" snapshot
+ object in the underlying storage system
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ spec:
+ description: spec defines properties of a VolumeSnapshotContent created
+ by the underlying storage system. Required.
+ properties:
+ deletionPolicy:
+ description: deletionPolicy determines whether this VolumeSnapshotContent
+ and its physical snapshot on the underlying storage system should
+ be deleted when its bound VolumeSnapshot is deleted. Supported values
+ are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
+ and its physical snapshot on underlying storage system are kept.
+ "Delete" means that the VolumeSnapshotContent and its physical snapshot
+ on underlying storage system are deleted. In dynamic snapshot creation
+ case, this field will be filled in with the "DeletionPolicy" field
+ defined in the VolumeSnapshotClass the VolumeSnapshot refers to.
+ For pre-existing snapshots, users MUST specify this field when creating
+ the VolumeSnapshotContent object. Required.
+ enum:
+ - Delete
+ - Retain
+ type: string
+ driver:
+ description: driver is the name of the CSI driver used to create the
+ physical snapshot on the underlying storage system. This MUST be
+ the same as the name returned by the CSI GetPluginName() call for
+ that driver. Required.
+ type: string
+ source:
+ description: source specifies from where a snapshot will be created.
+ This field is immutable after creation. Required.
+ properties:
+ snapshotHandle:
+ description: snapshotHandle specifies the CSI "snapshot_id" of
+ a pre-existing snapshot on the underlying storage system. This
+ field is immutable.
+ type: string
+ volumeHandle:
+ description: volumeHandle specifies the CSI "volume_id" of the
+ volume from which a snapshot should be dynamically taken from.
+ This field is immutable.
+ type: string
+ type: object
+ volumeSnapshotClassName:
+ description: name of the VolumeSnapshotClass to which this snapshot
+ belongs.
+ type: string
+ volumeSnapshotRef:
+ description: volumeSnapshotRef specifies the VolumeSnapshot object
+ to which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
+ field must reference to this VolumeSnapshotContent's name for the
+ bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
+ object, name and namespace of the VolumeSnapshot object MUST be
+ provided for binding to happen. This field is immutable after creation.
+ Required.
+ properties:
+ apiVersion:
+ description: API version of the referent.
+ type: string
+ fieldPath:
+ description: 'If referring to a piece of an object instead of
+ an entire object, this string should contain a valid JSON/Go
+ field access statement, such as desiredState.manifest.containers[2].
+ For example, if the object reference is to a container within
+ a pod, this would take on a value like: "spec.containers{name}"
+ (where "name" refers to the name of the container that triggered
+ the event) or if no container name is specified "spec.containers[2]"
+ (container with index 2 in this pod). This syntax is chosen
+ only to have some well-defined way of referencing a part of
+ an object. TODO: this design is not final and this field is
+ subject to change in the future.'
+ type: string
+ kind:
+ description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ name:
+ description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
+ type: string
+ namespace:
+ description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
+ type: string
+ resourceVersion:
+ description: 'Specific resourceVersion to which this reference
+ is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
+ type: string
+ uid:
+ description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
+ type: string
+ type: object
+ required:
+ - deletionPolicy
+ - driver
+ - source
+ - volumeSnapshotRef
+ type: object
+ status:
+ description: status represents the current information of a snapshot.
+ properties:
+ creationTime:
+ description: creationTime is the timestamp when the point-in-time
+ snapshot is taken by the underlying storage system. In dynamic snapshot
+ creation case, this field will be filled in with the "creation_time"
+ value returned from CSI "CreateSnapshotRequest" gRPC call. For a
+ pre-existing snapshot, this field will be filled with the "creation_time"
+ value returned from the CSI "ListSnapshots" gRPC call if the driver
+ supports it. If not specified, it indicates the creation time is
+ unknown. The format of this field is a Unix nanoseconds time encoded
+ as an int64. On Unix, the command `date +%s%N` returns the current
+ time in nanoseconds since 1970-01-01 00:00:00 UTC.
+ format: int64
+ type: integer
+ error:
+ description: error is the latest observed error during snapshot creation,
+ if any.
+ properties:
+ message:
+ description: 'message is a string detailing the encountered error
+ during snapshot creation if specified. NOTE: message may be
+ logged, and it should not contain sensitive information.'
+ type: string
+ time:
+ description: time is the timestamp when the error was encountered.
+ format: date-time
+ type: string
+ type: object
+ readyToUse:
+ description: readyToUse indicates if a snapshot is ready to be used
+ to restore a volume. In dynamic snapshot creation case, this field
+ will be filled in with the "ready_to_use" value returned from CSI
+ "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot,
+ this field will be filled with the "ready_to_use" value returned
+ from the CSI "ListSnapshots" gRPC call if the driver supports it,
+ otherwise, this field will be set to "True". If not specified, it
+ means the readiness of a snapshot is unknown.
+ type: boolean
+ restoreSize:
+ description: restoreSize represents the complete size of the snapshot
+ in bytes. In dynamic snapshot creation case, this field will be
+ filled in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
+ gRPC call. For a pre-existing snapshot, this field will be filled
+ with the "size_bytes" value returned from the CSI "ListSnapshots"
+ gRPC call if the driver supports it. When restoring a volume from
+ this snapshot, the size of the volume MUST NOT be smaller than the
+ restoreSize if it is specified, otherwise the restoration will fail.
+ If not specified, it indicates that the size is unknown.
+ format: int64
+ minimum: 0
+ type: integer
+ snapshotHandle:
+ description: snapshotHandle is the CSI "snapshot_id" of a snapshot
+ on the underlying storage system. If not specified, it indicates
+ that dynamic snapshot creation has either failed or it is still
+ in progress.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/deploy/v1.8.0/csi-azurefile-controller.yaml b/deploy/v1.8.0/csi-azurefile-controller.yaml
new file mode 100644
index 0000000000..de7f11f2b6
--- /dev/null
+++ b/deploy/v1.8.0/csi-azurefile-controller.yaml
@@ -0,0 +1,177 @@
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: csi-azurefile-controller
+ namespace: kube-system
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: csi-azurefile-controller
+ template:
+ metadata:
+ labels:
+ app: csi-azurefile-controller
+ spec:
+ hostNetwork: true # only required for MSI enabled cluster
+ serviceAccountName: csi-azurefile-controller-sa
+ nodeSelector:
+ kubernetes.io/os: linux # add "kubernetes.io/role: master" to run controller on master node
+ priorityClassName: system-cluster-critical
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Exists"
+ effect: "NoSchedule"
+ - key: "node-role.kubernetes.io/controlplane"
+ operator: "Exists"
+ effect: "NoSchedule"
+ containers:
+ - name: csi-provisioner
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-provisioner:v2.2.2
+ args:
+ - "-v=2"
+ - "--csi-address=$(ADDRESS)"
+ - "--leader-election"
+ - "--timeout=300s"
+ - "--extra-create-metadata=true"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ resources:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: csi-attacher
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-attacher:v3.3.0
+ args:
+ - "-v=2"
+ - "-csi-address=$(ADDRESS)"
+ - "-timeout=120s"
+ - "-leader-election"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ resources:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: csi-snapshotter
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-snapshotter:v3.0.3
+ args:
+ - "-v=2"
+ - "-csi-address=$(ADDRESS)"
+ - "-leader-election"
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: csi-resizer
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-resizer:v1.3.0
+ args:
+ - "-csi-address=$(ADDRESS)"
+ - "-v=2"
+ - "-leader-election"
+ - '-handle-volume-inuse-error=false'
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources:
+ limits:
+ cpu: 1
+ memory: 500Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: liveness-probe
+ image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.5.0
+ args:
+ - --csi-address=/csi/csi.sock
+ - --probe-timeout=3s
+ - --health-port=29612
+ - --v=2
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ resources:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: azurefile
+ image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0
+ imagePullPolicy: IfNotPresent
+ args:
+ - "--v=5"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--metrics-address=0.0.0.0:29614"
+ - "--user-agent-suffix=OSS-kubectl"
+ ports:
+ - containerPort: 29612
+ name: healthz
+ protocol: TCP
+ - containerPort: 29614
+ name: metrics
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: AZURE_CREDENTIAL_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: azure-cred-file
+ key: path
+ optional: true
+ - name: CSI_ENDPOINT
+ value: unix:///csi/csi.sock
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ - mountPath: /etc/kubernetes/
+ name: azure-cred
+ resources:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ volumes:
+ - name: socket-dir
+ emptyDir: {}
+ - name: azure-cred
+ hostPath:
+ path: /etc/kubernetes/
+ type: DirectoryOrCreate
diff --git a/deploy/v1.8.0/csi-azurefile-driver.yaml b/deploy/v1.8.0/csi-azurefile-driver.yaml
new file mode 100644
index 0000000000..0d34d01568
--- /dev/null
+++ b/deploy/v1.8.0/csi-azurefile-driver.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: CSIDriver
+metadata:
+ name: file.csi.azure.com
+ annotations:
+ csiDriver: v1.8.0
+ snapshot: v3.0.3
+spec:
+ attachRequired: false
+ podInfoOnMount: true
+ volumeLifecycleModes:
+ - Persistent
+ - Ephemeral
diff --git a/deploy/v1.8.0/csi-azurefile-node-windows.yaml b/deploy/v1.8.0/csi-azurefile-node-windows.yaml
new file mode 100644
index 0000000000..ccda117245
--- /dev/null
+++ b/deploy/v1.8.0/csi-azurefile-node-windows.yaml
@@ -0,0 +1,184 @@
+---
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: csi-azurefile-node-win
+ namespace: kube-system
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: csi-azurefile-node-win
+ template:
+ metadata:
+ labels:
+ app: csi-azurefile-node-win
+ spec:
+ serviceAccountName: csi-azurefile-node-sa
+ tolerations:
+ - key: "node.kubernetes.io/os"
+ operator: "Exists"
+ effect: "NoSchedule"
+ nodeSelector:
+ kubernetes.io/os: windows
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: type
+ operator: NotIn
+ values:
+ - virtual-kubelet
+ priorityClassName: system-node-critical
+ containers:
+ - name: liveness-probe
+ volumeMounts:
+ - mountPath: C:\csi
+ name: plugin-dir
+ image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.5.0
+ args:
+ - --csi-address=$(CSI_ENDPOINT)
+ - --probe-timeout=3s
+ - --health-port=29613
+ - --v=2
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ resources:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: node-driver-registrar
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.4.0
+ args:
+ - --v=2
+ - --csi-address=$(CSI_ENDPOINT)
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ livenessProbe:
+ exec:
+ command:
+ - /csi-node-driver-registrar.exe
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --mode=kubelet-registration-probe
+ initialDelaySeconds: 60
+ timeoutSeconds: 30
+ env:
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: C:\\var\\lib\\kubelet\\plugins\\file.csi.azure.com\\csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ volumeMounts:
+ - name: kubelet-dir
+ mountPath: "C:\\var\\lib\\kubelet"
+ - name: plugin-dir
+ mountPath: C:\csi
+ - name: registration-dir
+ mountPath: C:\registration
+ resources:
+ limits:
+ cpu: 1
+ memory: 200Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: azurefile
+ image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0
+ imagePullPolicy: IfNotPresent
+ args:
+ - --v=5
+ - --endpoint=$(CSI_ENDPOINT)
+ - --nodeid=$(KUBE_NODE_NAME)
+ - --kubeconfig=C:\\k\\config
+ - --metrics-address=0.0.0.0:29615
+ ports:
+ - containerPort: 29613
+ name: healthz
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: AZURE_CREDENTIAL_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: azure-cred-file
+ key: path-windows
+ optional: true
+ - name: CSI_ENDPOINT
+ value: unix://C:\\csi\\csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ volumeMounts:
+ - name: kubelet-dir
+ mountPath: "C:\\var\\lib\\kubelet"
+ - name: plugin-dir
+ mountPath: C:\csi
+ - name: azure-config
+ mountPath: C:\k
+ - name: csi-proxy-fs-pipe-v1
+ mountPath: \\.\pipe\csi-proxy-filesystem-v1
+ - name: csi-proxy-smb-pipe-v1
+ mountPath: \\.\pipe\csi-proxy-smb-v1
+ # these paths are still included for compatibility, they're used
+ # only if the node has still the beta version of the CSI proxy
+ - name: csi-proxy-fs-pipe-v1beta1
+ mountPath: \\.\pipe\csi-proxy-filesystem-v1beta1
+ - name: csi-proxy-smb-pipe-v1beta1
+ mountPath: \\.\pipe\csi-proxy-smb-v1beta1
+ resources:
+ limits:
+ cpu: 1
+ memory: 400Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ volumes:
+ - name: csi-proxy-fs-pipe-v1
+ hostPath:
+ path: \\.\pipe\csi-proxy-filesystem-v1
+ - name: csi-proxy-smb-pipe-v1
+ hostPath:
+ path: \\.\pipe\csi-proxy-smb-v1
+ # these paths are still included for compatibility, they're used
+ # only if the node has still the beta version of the CSI proxy
+ - name: csi-proxy-fs-pipe-v1beta1
+ hostPath:
+ path: \\.\pipe\csi-proxy-filesystem-v1beta1
+ - name: csi-proxy-smb-pipe-v1beta1
+ hostPath:
+ path: \\.\pipe\csi-proxy-smb-v1beta1
+ - name: registration-dir
+ hostPath:
+ path: C:\var\lib\kubelet\plugins_registry\
+ type: Directory
+ - name: kubelet-dir
+ hostPath:
+ path: C:\var\lib\kubelet\
+ type: Directory
+ - name: plugin-dir
+ hostPath:
+ path: C:\var\lib\kubelet\plugins\file.csi.azure.com\
+ type: DirectoryOrCreate
+ - name: azure-config
+ hostPath:
+ path: C:\k
+ type: DirectoryOrCreate
diff --git a/deploy/v1.8.0/csi-azurefile-node.yaml b/deploy/v1.8.0/csi-azurefile-node.yaml
new file mode 100644
index 0000000000..8007d4e227
--- /dev/null
+++ b/deploy/v1.8.0/csi-azurefile-node.yaml
@@ -0,0 +1,160 @@
+---
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: csi-azurefile-node
+ namespace: kube-system
+spec:
+ updateStrategy:
+ rollingUpdate:
+ maxUnavailable: 1
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: csi-azurefile-node
+ template:
+ metadata:
+ labels:
+ app: csi-azurefile-node
+ spec:
+ hostNetwork: true
+ dnsPolicy: Default
+ serviceAccountName: csi-azurefile-node-sa
+ nodeSelector:
+ kubernetes.io/os: linux
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: type
+ operator: NotIn
+ values:
+ - virtual-kubelet
+ priorityClassName: system-node-critical
+ tolerations:
+ - operator: "Exists"
+ containers:
+ - name: liveness-probe
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ image: mcr.microsoft.com/oss/kubernetes-csi/livenessprobe:v2.5.0
+ args:
+ - --csi-address=/csi/csi.sock
+ - --probe-timeout=3s
+ - --health-port=29613
+ - --v=2
+ resources:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: node-driver-registrar
+ image: mcr.microsoft.com/oss/kubernetes-csi/csi-node-driver-registrar:v2.4.0
+ args:
+ - --csi-address=$(ADDRESS)
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --v=2
+ livenessProbe:
+ exec:
+ command:
+ - /csi-node-driver-registrar
+ - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)
+ - --mode=kubelet-registration-probe
+ initialDelaySeconds: 30
+ timeoutSeconds: 15
+ env:
+ - name: ADDRESS
+ value: /csi/csi.sock
+ - name: DRIVER_REG_SOCK_PATH
+ value: /var/lib/kubelet/plugins/file.csi.azure.com/csi.sock
+ volumeMounts:
+ - name: socket-dir
+ mountPath: /csi
+ - name: registration-dir
+ mountPath: /registration
+ resources:
+ limits:
+ cpu: 1
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ - name: azurefile
+ image: mcr.microsoft.com/k8s/csi/azurefile-csi:v1.8.0
+ imagePullPolicy: IfNotPresent
+ args:
+ - "--v=5"
+ - "--endpoint=$(CSI_ENDPOINT)"
+ - "--nodeid=$(KUBE_NODE_NAME)"
+ - "--metrics-address=0.0.0.0:29615"
+ ports:
+ - containerPort: 29613
+ name: healthz
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /healthz
+ port: healthz
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ env:
+ - name: AZURE_CREDENTIAL_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: azure-cred-file
+ key: path
+ optional: true
+ - name: CSI_ENDPOINT
+ value: unix:///csi/csi.sock
+ - name: KUBE_NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /csi
+ name: socket-dir
+ - mountPath: /var/lib/kubelet/
+ mountPropagation: Bidirectional
+ name: mountpoint-dir
+ - mountPath: /etc/kubernetes/
+ name: azure-cred
+ - mountPath: /dev
+ name: device-dir
+ resources:
+ limits:
+ cpu: 1
+ memory: 300Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
+ volumes:
+ - hostPath:
+ path: /var/lib/kubelet/plugins/file.csi.azure.com
+ type: DirectoryOrCreate
+ name: socket-dir
+ - hostPath:
+ path: /var/lib/kubelet/
+ type: DirectoryOrCreate
+ name: mountpoint-dir
+ - hostPath:
+ path: /var/lib/kubelet/plugins_registry/
+ type: DirectoryOrCreate
+ name: registration-dir
+ - hostPath:
+ path: /etc/kubernetes/
+ type: DirectoryOrCreate
+ name: azure-cred
+ - hostPath:
+ path: /dev
+ type: Directory
+ name: device-dir
+---
diff --git a/deploy/v1.8.0/csi-snapshot-controller.yaml b/deploy/v1.8.0/csi-snapshot-controller.yaml
new file mode 100644
index 0000000000..28b03abafd
--- /dev/null
+++ b/deploy/v1.8.0/csi-snapshot-controller.yaml
@@ -0,0 +1,42 @@
+---
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: csi-snapshot-controller
+ namespace: kube-system
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: csi-snapshot-controller
+ template:
+ metadata:
+ labels:
+ app: csi-snapshot-controller
+ spec:
+ serviceAccountName: csi-snapshot-controller-sa
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Equal"
+ value: "true"
+ effect: "NoSchedule"
+ - key: "node-role.kubernetes.io/controlplane"
+ operator: "Equal"
+ value: "true"
+ effect: "NoSchedule"
+ containers:
+ - name: csi-snapshot-controller
+ image: mcr.microsoft.com/oss/kubernetes-csi/snapshot-controller:v3.0.3
+ args:
+ - "--v=2"
+ - "--leader-election=true"
+ resources:
+ limits:
+ cpu: 200m
+ memory: 100Mi
+ requests:
+ cpu: 10m
+ memory: 20Mi
diff --git a/deploy/v1.8.0/rbac-csi-azurefile-controller.yaml b/deploy/v1.8.0/rbac-csi-azurefile-controller.yaml
new file mode 100644
index 0000000000..f38ed5ab56
--- /dev/null
+++ b/deploy/v1.8.0/rbac-csi-azurefile-controller.yaml
@@ -0,0 +1,197 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: csi-azurefile-controller-sa
+ namespace: kube-system
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-external-provisioner-role
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["csinodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots"]
+ verbs: ["get", "list"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["get", "list"]
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-csi-provisioner-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-azurefile-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: azurefile-external-provisioner-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-external-attacher-role
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["csi.storage.k8s.io"]
+ resources: ["csinodeinfos"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["volumeattachments/status"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "list", "watch", "create", "update", "patch"]
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-csi-attacher-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-azurefile-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: azurefile-external-attacher-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-external-snapshotter-role
+rules:
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["create", "get", "list", "watch", "update", "delete"]
+ - apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "list", "watch", "delete"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents/status"]
+ verbs: ["update"]
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "watch", "list", "delete", "update", "create", "patch"]
+
+---
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-csi-snapshotter-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-azurefile-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: azurefile-external-snapshotter-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-external-resizer-role
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims/status"]
+ verbs: ["update", "patch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: azurefile-csi-resizer-role
+subjects:
+ - kind: ServiceAccount
+ name: csi-azurefile-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: azurefile-external-resizer-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-azurefile-controller-secret-role
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list", "create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-azurefile-controller-secret-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-azurefile-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: csi-azurefile-controller-secret-role
+ apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/v1.8.0/rbac-csi-azurefile-node.yaml b/deploy/v1.8.0/rbac-csi-azurefile-node.yaml
new file mode 100644
index 0000000000..903f6c8e25
--- /dev/null
+++ b/deploy/v1.8.0/rbac-csi-azurefile-node.yaml
@@ -0,0 +1,30 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: csi-azurefile-node-sa
+ namespace: kube-system
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-azurefile-node-secret-role
+rules:
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-azurefile-node-secret-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-azurefile-node-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: csi-azurefile-node-secret-role
+ apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/v1.8.0/rbac-csi-snapshot-controller.yaml b/deploy/v1.8.0/rbac-csi-snapshot-controller.yaml
new file mode 100644
index 0000000000..4583e21896
--- /dev/null
+++ b/deploy/v1.8.0/rbac-csi-snapshot-controller.yaml
@@ -0,0 +1,75 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: csi-snapshot-controller-sa
+ namespace: kube-system
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-role
+rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshotcontents"]
+ verbs: ["create", "get", "list", "watch", "update", "delete"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["snapshot.storage.k8s.io"]
+ resources: ["volumesnapshots/status"]
+ verbs: ["update"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-snapshot-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: csi-snapshot-controller-role
+ apiGroup: rbac.authorization.k8s.io
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-leaderelection-role
+rules:
+ - apiGroups: ["coordination.k8s.io"]
+ resources: ["leases"]
+ verbs: ["get", "watch", "list", "delete", "update", "create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: csi-snapshot-controller-leaderelection-binding
+subjects:
+ - kind: ServiceAccount
+ name: csi-snapshot-controller-sa
+ namespace: kube-system
+roleRef:
+ kind: ClusterRole
+ name: csi-snapshot-controller-leaderelection-role
+ apiGroup: rbac.authorization.k8s.io
diff --git a/docs/csi-debug.md b/docs/csi-debug.md
index 633c79478b..c01675fc3c 100644
--- a/docs/csi-debug.md
+++ b/docs/csi-debug.md
@@ -50,6 +50,23 @@ accountname.file.core.windows.net:/accountname/pvcn-46c357b2-333b-4c42-8a7f-2133
accountname.file.core.windows.net:/accountname/pvcn-46c357b2-333b-4c42-8a7f-2133023d6c48 on /var/lib/kubelet/pods/7994e352-a4ee-4750-8cb4-db4fcf48543e/volumes/kubernetes.io~csi/pvc-46c357b2-333b-4c42-8a7f-2133023d6c48/mount type nfs4 (rw,relatime,vers=4.1,rsize=1048576,wsize=1048576,namlen=255,hard,proto=tcp,timeo=600,retrans=2,sec=sys,clientaddr=10.244.0.6,local_lock=none,addr=20.150.29.168)
+ - get cloud config file(`azure.json`) on Linux node
+```console
+kubectl exec -it csi-azurefile-node-dx94w -n kube-system -c azurefile -- cat /etc/kubernetes/azure.json
+```
+
+ - get cloud config file(`azure.json`) on Windows node
+```console
+kubectl exec -it csi-azurefile-node-win-xxxxx -n kube-system -c azurefile cmd
+type c:\k\azure.json
+```
+
+ - get Windows csi-proxy logs inside driver
+```console
+kubectl exec -it csi-azurefile-node-win-xxxxx -n kube-system -c azurefile cmd
+type c:\k\csi-proxy.err.log
+```
+
#### Update driver version quickly by editting driver deployment directly
- update controller deployment
```console
@@ -71,7 +88,7 @@ change below deployment config, e.g.
- On Linux node
```console
mkdir /tmp/test
-sudo mount -v -t cifs //accountname.blob.core.windows.net/filesharename /tmp/test -o vers=3.0,username=accountname,password=accountkey,dir_mode=0777,file_mode=0777,cache=strict,actimeo=30
+sudo mount -v -t cifs //accountname.blob.core.windows.net/filesharename /tmp/test -o username=accountname,password=accountkey,dir_mode=0777,file_mode=0777,cache=strict,actimeo=30
```
- On Windows node
diff --git a/docs/driver-parameters.md b/docs/driver-parameters.md
index 5c27dec08b..280e3b9869 100644
--- a/docs/driver-parameters.md
+++ b/docs/driver-parameters.md
@@ -6,7 +6,7 @@
Name | Meaning | Example | Mandatory | Default value
--- | --- | --- | --- | ---
-skuName | Azure file storage account type (alias: `storageAccountType`) | `Standard_LRS`, `Standard_ZRS`, `Standard_GRS`, `Standard_RAGRS`, `Premium_LRS` | No | `Standard_LRS`
Note:
1. minimum file share size of Premium account type is `100GB`
2.[`ZRS` account type](https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy#zone-redundant-storage) is supported in limited regions
3. Premium files shares is currently only available for LRS
+skuName | Azure file storage account type (alias: `storageAccountType`) | `Standard_LRS`, `Standard_ZRS`, `Standard_GRS`, `Standard_RAGRS`, `Premium_LRS`, `Premium_ZRS` | No | `Standard_LRS`
Note:
1. minimum file share size of Premium account type is `100GB`
2.[`ZRS` account type](https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy#zone-redundant-storage) is supported in limited regions
3. NFS file share only supports Premium account type
storageAccount | specify Azure storage account name| STORAGE_ACCOUNT_NAME | No | if empty, driver will find a suitable storage account that matches account settings in the same resource group; if a storage account name is provided, storage account must exist.
enableLargeFileShares | specify whether to use a storage account with large file shares enabled or not. If this flag is set to true and a storage account with large file shares enabled doesn't exist, a new storage account with large file shares enabled will be created. This flag should be used with the standard sku as the storage accounts created with premium sku have largeFileShares option enabled by default. | `true`,`false` | No | `false`
protocol | specify file share protocol | `smb`, `nfs` (`nfs` is in [Preview](https://github.com/kubernetes-sigs/azurefile-csi-driver/tree/master/deploy/example/nfs)) | No | `smb`
diff --git a/docs/install-azurefile-csi-driver.md b/docs/install-azurefile-csi-driver.md
index 53f3e8916a..eefa09dc3f 100644
--- a/docs/install-azurefile-csi-driver.md
+++ b/docs/install-azurefile-csi-driver.md
@@ -1,6 +1,6 @@
## Install azurefile CSI driver on a Kubernetes cluster
- [install CSI driver master version](./install-csi-driver-master.md)
+ - [install v1.8.0 CSI driver](./install-csi-driver-v1.8.0.md)
- [install v1.7.0 CSI driver](./install-csi-driver-v1.7.0.md)
- [install v1.6.0 CSI driver](./install-csi-driver-v1.6.0.md)
- - [install v1.5.0 CSI driver](./install-csi-driver-v1.5.0.md)
diff --git a/docs/install-csi-driver-v1.8.0.md b/docs/install-csi-driver-v1.8.0.md
new file mode 100644
index 0000000000..5c5589bdf0
--- /dev/null
+++ b/docs/install-csi-driver-v1.8.0.md
@@ -0,0 +1,26 @@
+## Install azurefile CSI driver v1.8.0 version on a Kubernetes cluster
+
+### Install by kubectl
+```console
+curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/v1.8.0/deploy/install-driver.sh | bash -s v1.8.0 --
+```
+
+ - check pods status:
+```console
+kubectl -n kube-system get pod -o wide --watch -l app=csi-azurefile-controller
+kubectl -n kube-system get pod -o wide --watch -l app=csi-azurefile-node
+```
+
+example output:
+
+```
+NAME READY STATUS RESTARTS AGE IP NODE
+csi-azurefile-controller-56bfddd689-dh5tk 6/6 Running 0 35s 10.240.0.19 k8s-agentpool-22533604-0
+csi-azurefile-node-cvgbs 3/3 Running 0 7m4s 10.240.0.35 k8s-agentpool-22533604-1
+csi-azurefile-node-dr4s4 3/3 Running 0 7m4s 10.240.0.4 k8s-agentpool-22533604-0
+```
+
+### clean up Azure File CSI driver
+```console
+curl -skSL https://raw.githubusercontent.com/kubernetes-sigs/azurefile-csi-driver/v1.8.0/deploy/uninstall-driver.sh | bash -s --
+```
diff --git a/docs/install-driver-on-aks.md b/docs/install-driver-on-aks.md
index 62339b19e1..cb445635e7 100644
--- a/docs/install-driver-on-aks.md
+++ b/docs/install-driver-on-aks.md
@@ -3,12 +3,15 @@
### Option#1: Enable CSI driver in AKS cluster creation with version < 1.21
> From AKS 1.21, Azure Disk and Azure File CSI drivers would be installed by default.
>
+> Make sure cluster `Control plane` identity(with name `AKS Cluster Name`) has `Contributor` permission on vnet resource group when using NFS protocol
+>
Follow AKS doc: [Enable CSI drivers for Azure disks and Azure Files on AKS (preview)](https://docs.microsoft.com/en-us/azure/aks/csi-storage-drivers)
### Option#2: Enable CSI driver on existing cluster with version < 1.21
+> Please remove manual installed open source CSI driver before upgrading to AKS 1.21 next time.
- Prerequisites
-AKS cluster is created with user assigned identity(with naming rule [`AKS Cluster Name-agentpool`](https://docs.microsoft.com/en-us/azure/aks/use-managed-identity#summary-of-managed-identities)) on agent node pool by default, make sure that identity has `Contributor` role on node resource group, follow below instruction to set up `Contributor` role on node resource group
+AKS cluster is created with user assigned identity(with naming rule [`AKS Cluster Name-agentpool`](https://docs.microsoft.com/en-us/azure/aks/use-managed-identity#summary-of-managed-identities)) on agent node pool by default, make sure that identity has `Contributor` role on node resource group, follow below instruction to set up `Contributor` permission on node resource group
![image](https://user-images.githubusercontent.com/4178417/120978367-f68f0a00-c7a6-11eb-8e87-89247d1ddc0b.png):
diff --git a/go.mod b/go.mod
index 0c663a43c1..063c1dc711 100644
--- a/go.mod
+++ b/go.mod
@@ -5,8 +5,8 @@ go 1.16
require (
github.com/Azure/azure-sdk-for-go v55.8.0+incompatible
github.com/Azure/azure-storage-file-go v0.8.0
- github.com/Azure/go-autorest/autorest v0.11.20
- github.com/Azure/go-autorest/autorest/adal v0.9.15
+ github.com/Azure/go-autorest/autorest v0.11.21
+ github.com/Azure/go-autorest/autorest/adal v0.9.16
github.com/Azure/go-autorest/autorest/to v0.4.0
github.com/container-storage-interface/spec v1.5.0
github.com/golang/mock v1.6.0
@@ -14,8 +14,8 @@ require (
github.com/kubernetes-csi/csi-lib-utils v0.7.0
github.com/kubernetes-csi/csi-proxy/client v1.0.1
github.com/kubernetes-csi/external-snapshotter/v2 v2.0.0-20200617021606-4800ca72d403
- github.com/onsi/ginkgo v1.16.4
- github.com/onsi/gomega v1.15.0
+ github.com/onsi/ginkgo v1.16.5
+ github.com/onsi/gomega v1.16.0
github.com/pborman/uuid v1.2.0
github.com/pelletier/go-toml v1.9.3
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021
@@ -23,17 +23,17 @@ require (
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781
google.golang.org/grpc v1.38.0
google.golang.org/protobuf v1.26.0
- k8s.io/api v0.22.1
- k8s.io/apimachinery v0.22.1
- k8s.io/client-go v0.22.1
- k8s.io/cloud-provider v0.22.1
- k8s.io/component-base v0.22.1
+ k8s.io/api v0.22.3
+ k8s.io/apimachinery v0.22.3
+ k8s.io/client-go v0.22.3
+ k8s.io/cloud-provider v0.22.2
+ k8s.io/component-base v0.22.3
k8s.io/klog/v2 v2.10.0
- k8s.io/kubernetes v1.21.0
+ k8s.io/kubernetes v1.21.1
k8s.io/mount-utils v0.0.0
- k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
+ k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
sigs.k8s.io/cloud-provider-azure v0.7.4
- sigs.k8s.io/yaml v1.2.0
+ sigs.k8s.io/yaml v1.3.0
)
replace (
@@ -70,5 +70,5 @@ replace (
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0
k8s.io/sample-controller => k8s.io/sample-controller v0.21.0
sigs.k8s.io/azurefile-csi-driver => ./
- sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.1-0.20210908075721-13c8062485f8
+ sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.1-0.20211103062220-cfbf336adc4e
)
diff --git a/go.sum b/go.sum
index bbe88cf488..546a8fefd2 100644
--- a/go.sum
+++ b/go.sum
@@ -52,13 +52,13 @@ github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
-github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M=
-github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY=
+github.com/Azure/go-autorest/autorest v0.11.21 h1:w77zY/9RnUAWcIQyDC0Fc89mCvwftR8F+zsR/OH6enk=
+github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk=
-github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc=
+github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
@@ -635,8 +635,9 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -644,8 +645,8 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
-github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c=
+github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -1358,8 +1359,8 @@ k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks=
k8s.io/kubelet v0.21.0 h1:1VUfM5vKqLPlWFI0zee6fm9kwIZ/UEOGCodVFN+OZrg=
k8s.io/kubelet v0.21.0/go.mod h1:G5ZxMTVev9t4bhmsSxDAWhH6wXDYEVHVVFyYsw4laR4=
k8s.io/kubernetes v1.18.0/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0=
-k8s.io/kubernetes v1.21.0 h1:LUUQgdFsKB+wVgKPUapmXjkvvJHSLN53CuQwre4c+mM=
-k8s.io/kubernetes v1.21.0/go.mod h1:Yx6XZ8zalyqEk7but+j4+5SvLzdyH1eeqZ4cwO+5dD4=
+k8s.io/kubernetes v1.21.1 h1:U7cVOSdG+sMNOfL9XlenBV7avSBDHyWPE66gWnnYIIc=
+k8s.io/kubernetes v1.21.1/go.mod h1:ef++isEL1PW0taH6z7DXrSztPglrZ7jQhyvcMEtm0gQ=
k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4=
k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ=
k8s.io/mount-utils v0.21.1 h1:uYf6zlKaaoUcPhWn6MElLkWf/f7UQgtkPZteumgwDbA=
@@ -1370,8 +1371,8 @@ k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn6
k8s.io/system-validators v1.4.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
-k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
@@ -1386,8 +1387,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
-sigs.k8s.io/cloud-provider-azure v0.7.1-0.20210908075721-13c8062485f8 h1:1ATDmua3D+ycsA8SdiY4r9poMwFreMFqwf1HWT1vEQw=
-sigs.k8s.io/cloud-provider-azure v0.7.1-0.20210908075721-13c8062485f8/go.mod h1:R+Rtr8xEZRuLrxGC8zLzpSUbpmolrFC+2+vgtMPb6k0=
+sigs.k8s.io/cloud-provider-azure v0.7.1-0.20211103062220-cfbf336adc4e h1:8hj9uX7WPvV/gz5vQmVz3mqErxiFA0yCZQIwpfKjoYA=
+sigs.k8s.io/cloud-provider-azure v0.7.1-0.20211103062220-cfbf336adc4e/go.mod h1:F7Lb6NRY/O8h4Ignbl82OilqC3CwdWpoD3RPl4OaMv0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY=
sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0=
@@ -1397,6 +1398,7 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/hack/verify-helm-chart.sh b/hack/verify-helm-chart.sh
index 5b59257dd5..55ebd297dd 100755
--- a/hack/verify-helm-chart.sh
+++ b/hack/verify-helm-chart.sh
@@ -37,8 +37,19 @@ function validate_image() {
echo "Comparing image version between helm chart and manifests in deploy folder"
+if [[ -z "$(command -v pip)" ]]; then
+ echo "Cannot find pip. Installing pip3..."
+ apt install python3-pip -y
+ update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
+fi
+
+if [[ -z "$(command -v jq)" ]]; then
+ echo "Cannot find jq. Installing yq..."
+ apt install jq -y
+fi
+
# jq-equivalent for yaml
-pip install yq
+pip install yq --ignore-installed PyYAML
# Extract images from csi-azurefile-controller.yaml
expected_csi_provisioner_image="$(cat ${PKG_ROOT}/deploy/csi-azurefile-controller.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)"
diff --git a/pkg/azurefile/azure.go b/pkg/azurefile/azure.go
index 44c3ff1968..1be06bc7d0 100644
--- a/pkg/azurefile/azure.go
+++ b/pkg/azurefile/azure.go
@@ -44,7 +44,7 @@ var (
)
// getCloudProvider get Azure Cloud Provider
-func getCloudProvider(kubeconfig, nodeID, secretName, secretNamespace, userAgent string) (*azure.Cloud, error) {
+func getCloudProvider(kubeconfig, nodeID, secretName, secretNamespace, userAgent string, allowEmptyCloudConfig bool) (*azure.Cloud, error) {
az := &azure.Cloud{
InitSecretConfig: azure.InitSecretConfig{
SecretName: secretName,
@@ -105,7 +105,11 @@ func getCloudProvider(kubeconfig, nodeID, secretName, secretNamespace, userAgent
}
if config == nil {
- klog.V(2).Infof("no cloud config provided, error: %v, driver will run without cloud config", err)
+ if allowEmptyCloudConfig {
+ klog.V(2).Infof("no cloud config provided, error: %v, driver will run without cloud config", err)
+ } else {
+ return az, fmt.Errorf("no cloud config provided, error: %v", err)
+ }
} else {
config.UserAgent = userAgent
if err = az.InitializeCloudFromConfig(config, fromSecret, false); err != nil {
diff --git a/pkg/azurefile/azure_test.go b/pkg/azurefile/azure_test.go
index a6372ba0f8..063f7d51e2 100644
--- a/pkg/azurefile/azure_test.go
+++ b/pkg/azurefile/azure_test.go
@@ -23,6 +23,7 @@ import (
"os"
"reflect"
"runtime"
+ "strings"
"testing"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-02-01/network"
@@ -76,8 +77,7 @@ users:
command: foo-command
`
- err := createTestFile(emptyKubeConfig)
- if err != nil {
+ if err := createTestFile(emptyKubeConfig); err != nil {
t.Error(err)
}
defer func() {
@@ -87,45 +87,62 @@ users:
}()
tests := []struct {
- desc string
- kubeconfig string
- userAgent string
- expectedErr testutil.TestError
+ desc string
+ createFakeCredFile bool
+ createFakeKubeConfig bool
+ kubeconfig string
+ userAgent string
+ allowEmptyCloudConfig bool
+ expectedErr testutil.TestError
}{
{
- desc: "[failure] out of cluster, no kubeconfig, no credential file",
- kubeconfig: "",
- expectedErr: testutil.TestError{},
+ desc: "out of cluster, no kubeconfig, no credential file",
+ kubeconfig: "",
+ allowEmptyCloudConfig: true,
+ expectedErr: testutil.TestError{},
+ },
+ {
+ desc: "[failure][disallowEmptyCloudConfig] out of cluster, no kubeconfig, no credential file",
+ kubeconfig: "",
+ allowEmptyCloudConfig: false,
+ expectedErr: testutil.TestError{
+ DefaultError: fmt.Errorf("no cloud config provided, error"),
+ },
},
{
- desc: "[failure] out of cluster & in cluster, specify a non-exist kubeconfig, no credential file",
- kubeconfig: notExistKubeConfig,
- expectedErr: testutil.TestError{},
+ desc: "[failure] out of cluster & in cluster, specify a non-exist kubeconfig, no credential file",
+ kubeconfig: notExistKubeConfig,
+ allowEmptyCloudConfig: true,
+ expectedErr: testutil.TestError{},
},
{
- desc: "[failure] out of cluster & in cluster, specify a empty kubeconfig, no credential file",
- kubeconfig: emptyKubeConfig,
+ desc: "[failure] out of cluster & in cluster, specify a empty kubeconfig, no credential file",
+ kubeconfig: emptyKubeConfig,
+ allowEmptyCloudConfig: true,
expectedErr: testutil.TestError{
DefaultError: fmt.Errorf("failed to get KubeClient: invalid configuration: no configuration has been provided, try setting KUBERNETES_MASTER environment variable"),
},
},
{
- desc: "[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file",
- kubeconfig: fakeKubeConfig,
- expectedErr: testutil.TestError{},
+ desc: "[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file",
+ createFakeKubeConfig: true,
+ kubeconfig: fakeKubeConfig,
+ allowEmptyCloudConfig: true,
+ expectedErr: testutil.TestError{},
},
{
- desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file",
- kubeconfig: "",
- userAgent: "useragent",
- expectedErr: testutil.TestError{},
+ desc: "[success] out of cluster & in cluster, no kubeconfig, a fake credential file",
+ createFakeCredFile: true,
+ kubeconfig: "",
+ userAgent: "useragent",
+ allowEmptyCloudConfig: true,
+ expectedErr: testutil.TestError{},
},
}
for _, test := range tests {
- if test.desc == "[failure] out of cluster & in cluster, specify a fake kubeconfig, no credential file" {
- err := createTestFile(fakeKubeConfig)
- if err != nil {
+ if test.createFakeKubeConfig {
+ if err := createTestFile(fakeKubeConfig); err != nil {
t.Error(err)
}
defer func() {
@@ -138,9 +155,8 @@ users:
t.Error(err)
}
}
- if test.desc == "[success] out of cluster & in cluster, no kubeconfig, a fake credential file" {
- err := createTestFile(fakeCredFile)
- if err != nil {
+ if test.createFakeCredFile {
+ if err := createTestFile(fakeCredFile); err != nil {
t.Error(err)
}
defer func() {
@@ -157,8 +173,8 @@ users:
}
os.Setenv(DefaultAzureCredentialFileEnv, fakeCredFile)
}
- cloud, err := getCloudProvider(test.kubeconfig, "", "", "", test.userAgent)
- if !testutil.AssertError(err, &test.expectedErr) {
+ cloud, err := getCloudProvider(test.kubeconfig, "", "", "", test.userAgent, test.allowEmptyCloudConfig)
+ if !testutil.AssertError(err, &test.expectedErr) && !strings.Contains(err.Error(), test.expectedErr.DefaultError.Error()) {
t.Errorf("desc: %s,\n input: %q, getCloudProvider err: %v, expectedErr: %v", test.desc, test.kubeconfig, err, test.expectedErr)
}
if cloud == nil {
diff --git a/pkg/azurefile/azurefile.go b/pkg/azurefile/azurefile.go
index 741477d6be..701b80b4bd 100644
--- a/pkg/azurefile/azurefile.go
+++ b/pkg/azurefile/azurefile.go
@@ -117,6 +117,7 @@ const (
vhdSuffix = ".vhd"
metaDataNode = "node"
networkEndpointTypeField = "networkendpointtype"
+ premium = "premium"
accountNotProvisioned = "StorageAccountIsNotProvisioned"
// this is a workaround fix for 429 throttling issue, will update cloud provider for better fix later
@@ -158,6 +159,8 @@ type DriverOptions struct {
CloudConfigSecretNamespace string
CustomUserAgent string
UserAgentSuffix string
+ AllowEmptyCloudConfig bool
+ EnableGetVolumeStats bool
}
// Driver implements all interfaces of CSI drivers
@@ -168,6 +171,8 @@ type Driver struct {
cloudConfigSecretNamespace string
customUserAgent string
userAgentSuffix string
+ allowEmptyCloudConfig bool
+ enableGetVolumeStats bool
fileClient *azureFileClient
mounter *mount.SafeFormatAndMount
// lock per volume attach (only for vhd disk feature)
@@ -202,14 +207,14 @@ func NewDriver(options *DriverOptions) *Driver {
driver.cloudConfigSecretNamespace = options.CloudConfigSecretNamespace
driver.customUserAgent = options.CustomUserAgent
driver.userAgentSuffix = options.UserAgentSuffix
+ driver.allowEmptyCloudConfig = options.AllowEmptyCloudConfig
driver.volLockMap = newLockMap()
driver.subnetLockMap = newLockMap()
driver.volumeLocks = newVolumeLocks()
- getter := func(key string) (interface{}, error) {
- return nil, nil
- }
var err error
+ getter := func(key string) (interface{}, error) { return nil, nil }
+
if driver.accountSearchCache, err = azcache.NewTimedcache(time.Minute, getter); err != nil {
klog.Fatalf("%v", err)
}
@@ -235,7 +240,7 @@ func (d *Driver) Run(endpoint, kubeconfig string, testBool bool) {
userAgent := GetUserAgent(d.Name, d.customUserAgent, d.userAgentSuffix)
klog.V(2).Infof("driver userAgent: %s", userAgent)
- d.cloud, err = getCloudProvider(kubeconfig, d.NodeID, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, userAgent)
+ d.cloud, err = getCloudProvider(kubeconfig, d.NodeID, d.cloudConfigSecretName, d.cloudConfigSecretNamespace, userAgent, d.allowEmptyCloudConfig)
if err != nil {
klog.Fatalf("failed to get Azure Cloud Provider, error: %v", err)
}
@@ -267,12 +272,15 @@ func (d *Driver) Run(endpoint, kubeconfig string, testBool bool) {
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
})
- d.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{
+ nodeCap := []csi.NodeServiceCapability_RPC_Type{
csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
- csi.NodeServiceCapability_RPC_GET_VOLUME_STATS,
- csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
csi.NodeServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
- })
+ csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
+ }
+ if d.enableGetVolumeStats {
+ nodeCap = append(nodeCap, csi.NodeServiceCapability_RPC_GET_VOLUME_STATS)
+ }
+ d.AddNodeServiceCapabilities(nodeCap)
s := csicommon.NewNonBlockingGRPCServer()
// Driver d act as IdentityServer, ControllerServer and NodeServer
diff --git a/pkg/azurefile/controllerserver.go b/pkg/azurefile/controllerserver.go
index 6ab8e9bc3c..d5c46d5b5f 100644
--- a/pkg/azurefile/controllerserver.go
+++ b/pkg/azurefile/controllerserver.go
@@ -207,8 +207,10 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
if fsType == nfs || protocol == nfs {
protocol = nfs
enableHTTPSTrafficOnly = false
- // default to Premium_LRS
- sku = string(storage.SkuNamePremiumLRS)
+ if !strings.HasPrefix(strings.ToLower(sku), premium) {
+ // NFS protocol only supports Premium storage
+ sku = string(storage.SkuNamePremiumLRS)
+ }
shareProtocol = storage.EnabledProtocolsNFS
// NFS protocol does not need account key
storeAccountKey = false
@@ -231,7 +233,7 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest)
fileShareSize := int(requestGiB)
// account kind should be FileStorage for Premium File
accountKind := string(storage.KindStorageV2)
- if strings.HasPrefix(strings.ToLower(sku), "premium") {
+ if strings.HasPrefix(strings.ToLower(sku), premium) {
accountKind = string(storage.KindFileStorage)
if fileShareSize < minimumPremiumShareSize {
fileShareSize = minimumPremiumShareSize
diff --git a/pkg/azurefile/utils.go b/pkg/azurefile/utils.go
index a55abbb2bc..498355d61b 100644
--- a/pkg/azurefile/utils.go
+++ b/pkg/azurefile/utils.go
@@ -114,7 +114,7 @@ func isRetriableError(err error) bool {
}
func sleepIfThrottled(err error, sleepSec int) {
- if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tooManyRequests)) {
+ if strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tooManyRequests)) || strings.Contains(strings.ToLower(err.Error()), clientThrottled) {
klog.Warningf("sleep %d more seconds, waiting for throttling complete", sleepSec)
time.Sleep(time.Duration(sleepSec) * time.Second)
}
diff --git a/pkg/azurefileplugin/Dockerfile b/pkg/azurefileplugin/Dockerfile
index c9bdc29d55..9b4ae829b8 100644
--- a/pkg/azurefileplugin/Dockerfile
+++ b/pkg/azurefileplugin/Dockerfile
@@ -18,7 +18,9 @@ ARG ARCH=amd64
COPY ./_output/${ARCH}/azurefileplugin /azurefileplugin
RUN apt update && apt-mark unhold libcap2
-RUN clean-install ca-certificates cifs-utils util-linux e2fsprogs mount udev xfsprogs nfs-common libssl1.1
+RUN clean-install ca-certificates cifs-utils util-linux e2fsprogs mount udev xfsprogs nfs-common
+# install updated packages to fix CVE issues
+RUN clean-install libssl1.1 libgssapi-krb5-2 libk5crypto3 libkrb5-3 libkrb5support0
LABEL maintainers="andyzhangx"
LABEL description="AzureFile CSI Driver"
diff --git a/pkg/azurefileplugin/main.go b/pkg/azurefileplugin/main.go
index cbbee7ae1e..45501401f0 100644
--- a/pkg/azurefileplugin/main.go
+++ b/pkg/azurefileplugin/main.go
@@ -46,6 +46,8 @@ var (
cloudConfigSecretNamespace = flag.String("cloud-config-secret-namespace", "kube-system", "secret namespace of cloud config")
customUserAgent = flag.String("custom-user-agent", "", "custom userAgent")
userAgentSuffix = flag.String("user-agent-suffix", "", "userAgent suffix")
+ allowEmptyCloudConfig = flag.Bool("allow-empty-cloud-config", true, "allow running driver without cloud config")
+ enableGetVolumeStats = flag.Bool("enable-get-volume-stats", false, "allow GET_VOLUME_STATS on agent node")
)
func main() {
@@ -77,6 +79,8 @@ func handle() {
CloudConfigSecretNamespace: *cloudConfigSecretNamespace,
CustomUserAgent: *customUserAgent,
UserAgentSuffix: *userAgentSuffix,
+ AllowEmptyCloudConfig: *allowEmptyCloudConfig,
+ EnableGetVolumeStats: *enableGetVolumeStats,
}
driver := azurefile.NewDriver(&driverOptions)
if driver == nil {
diff --git a/test/e2e/dynamic_provisioning_test.go b/test/e2e/dynamic_provisioning_test.go
index 9f3f284258..9540df0525 100644
--- a/test/e2e/dynamic_provisioning_test.go
+++ b/test/e2e/dynamic_provisioning_test.go
@@ -1107,10 +1107,12 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: map[string]string{
- "skuName": "Premium_LRS",
"protocol": "nfs",
},
}
+ if supportZRSwithNFS {
+ test.StorageClassParameters["skuName"] = "Premium_ZRS"
+ }
test.Run(cs, ns)
})
diff --git a/test/e2e/manifest/containerd-windows.json b/test/e2e/manifest/containerd-windows.json
index 350a3e5ff8..a1624d4034 100644
--- a/test/e2e/manifest/containerd-windows.json
+++ b/test/e2e/manifest/containerd-windows.json
@@ -31,7 +31,7 @@
"windowsProfile": {
"adminUsername": "azureuser",
"adminPassword": "replacepassword1234$",
- "csiProxyURL": "https://acs-mirror.azureedge.net/csi-proxy/v0.2.2/binaries/csi-proxy-v0.2.2.tar.gz",
+ "csiProxyURL": "https://acs-mirror.azureedge.net/csi-proxy/v1.0.2/binaries/csi-proxy-v1.0.2.tar.gz",
"enableCSIProxy": true,
"sshEnabled": true,
"windowsPublisher": "microsoft-aks",
diff --git a/test/e2e/manifest/windows.json b/test/e2e/manifest/windows.json
index 0013d3acdd..e99b8c1494 100644
--- a/test/e2e/manifest/windows.json
+++ b/test/e2e/manifest/windows.json
@@ -22,14 +22,13 @@
"count": 2,
"vmSize": "Standard_D4s_v3",
"osDiskSizeGB": 128,
- "availabilityProfile": "AvailabilitySet",
"osType": "Windows"
}
],
"windowsProfile": {
"adminUsername": "azureuser",
"adminPassword": "replacepassword1234$",
- "csiProxyURL": "https://acs-mirror.azureedge.net/csi-proxy/v1.0.0/binaries/csi-proxy-v1.0.0.tar.gz",
+ "csiProxyURL": "https://acs-mirror.azureedge.net/csi-proxy/v1.0.2/binaries/csi-proxy-v1.0.2.tar.gz",
"enableCSIProxy": true,
"sshEnabled": true,
"windowsPublisher": "microsoft-aks",
diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go
index 2bcbb61468..752dfac497 100644
--- a/test/e2e/suite_test.go
+++ b/test/e2e/suite_test.go
@@ -59,6 +59,7 @@ var (
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
"csi.storage.k8s.io/node-stage-secret-namespace": "default",
}
+ supportZRSwithNFS bool
)
type testCmd struct {
@@ -88,6 +89,14 @@ var _ = ginkgo.BeforeSuite(func() {
_, err = azureClient.EnsureResourceGroup(context.Background(), creds.ResourceGroup, creds.Location, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
+ // check whether current region supports Premium_ZRS with NFS protocol
+ supportedRegions := []string{"southeastasia", "australiaeast", "europenorth", "europewest", "francecentral", "japaneast", "uksouth", "useast", "useast2", "uswest2"}
+ for _, region := range supportedRegions {
+ if creds.Location == region {
+ supportZRSwithNFS = true
+ }
+ }
+
// Install Azure File CSI Driver on cluster from project root
e2eBootstrap := testCmd{
command: "make",
diff --git a/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go b/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go
index f7735cf459..ca8178b8ee 100644
--- a/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go
+++ b/test/e2e/testsuites/dynamically_provisioned_read_only_volume_tester.go
@@ -18,11 +18,11 @@ package testsuites
import (
"fmt"
+ "strings"
"sigs.k8s.io/azurefile-csi-driver/test/e2e/driver"
"github.com/onsi/ginkgo"
- "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@@ -43,6 +43,7 @@ func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interfac
if pod.IsWindows {
expectedReadOnlyLog = "FileOpenFailure"
}
+ permissionDeniedLog := "Permission denied"
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
@@ -58,6 +59,7 @@ func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interfac
ginkgo.By("checking that pod logs contain expected message")
body, err := tpod.Logs()
framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err))
- gomega.Expect(string(body)).To(gomega.ContainSubstring(expectedReadOnlyLog))
+ hasReadOnlyLog := strings.Contains(string(body), expectedReadOnlyLog) || strings.Contains(string(body), permissionDeniedLog)
+ framework.ExpectEqual(hasReadOnlyLog, true, fmt.Sprintf("expected substring: %s or %s, current returned logs: %s", expectedReadOnlyLog, permissionDeniedLog, string(body)))
}
}
diff --git a/test/sanity/run-test.sh b/test/sanity/run-test.sh
index 1ea03b9e77..ddeaa1a819 100755
--- a/test/sanity/run-test.sh
+++ b/test/sanity/run-test.sh
@@ -40,7 +40,7 @@ _output/${ARCH}/azurefileplugin --endpoint "$endpoint" --nodeid "$nodeid" -v=5 &
echo 'Begin to run sanity test...'
readonly CSI_SANITY_BIN='csi-sanity'
-"$CSI_SANITY_BIN" --ginkgo.v --ginkgo.noColor --csi.endpoint="$endpoint" --ginkgo.skip='should fail when the volume source snapshot is not found|should work|should fail when the volume does not exist|should fail when the node does not exist|Node Service NodeGetCapabilities|should remove target path|should return appropriate capabilities'
+"$CSI_SANITY_BIN" --ginkgo.v --ginkgo.noColor --csi.endpoint="$endpoint" --ginkgo.skip='should fail when the volume source snapshot is not found|should work|should fail when the volume does not exist|should fail when the node does not exist|Node Service NodeGetCapabilities|should remove target path'
testvolumeparameters='/tmp/vhd.yaml'
cat > $testvolumeparameters << EOF
@@ -48,4 +48,4 @@ fstype: ext4
EOF
echo 'Begin to run sanity test for vhd disk feature...'
-"$CSI_SANITY_BIN" --ginkgo.v --ginkgo.noColor --csi.endpoint="$endpoint" --csi.testvolumeparameters="$testvolumeparameters" --ginkgo.skip='should fail when the volume source snapshot is not found|should work|should fail when volume does not exist on the specified path|should fail when the volume does not exist|should fail when the node does not exist|should be idempotent|Node Service NodeGetCapabilities|should remove target path|should return appropriate capabilities'
+"$CSI_SANITY_BIN" --ginkgo.v --ginkgo.noColor --csi.endpoint="$endpoint" --csi.testvolumeparameters="$testvolumeparameters" --ginkgo.skip='should fail when the volume source snapshot is not found|should work|should fail when volume does not exist on the specified path|should fail when the volume does not exist|should fail when the node does not exist|should be idempotent|Node Service NodeGetCapabilities|should remove target path'
diff --git a/test/sanity/run-tests-all-clouds.sh b/test/sanity/run-tests-all-clouds.sh
index 8ed898ce57..c48e778d8c 100755
--- a/test/sanity/run-tests-all-clouds.sh
+++ b/test/sanity/run-tests-all-clouds.sh
@@ -21,7 +21,7 @@ function install_csi_sanity_bin {
mkdir -p $GOPATH/src/github.com/kubernetes-csi
pushd $GOPATH/src/github.com/kubernetes-csi
export GO111MODULE=off
- git clone https://github.com/kubernetes-csi/csi-test.git -b v4.2.0
+ git clone https://github.com/kubernetes-csi/csi-test.git -b v4.3.0
pushd csi-test/cmd/csi-sanity
make install
popd
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
index 1826a68dc8..eb649bce9f 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
@@ -16,9 +16,11 @@ package adal
import (
"crypto/tls"
+ "net"
"net/http"
"net/http/cookiejar"
"sync"
+ "time"
"github.com/Azure/go-autorest/tracing"
)
@@ -72,15 +74,18 @@ func sender() Sender {
// note that we can't init defaultSender in init() since it will
// execute before calling code has had a chance to enable tracing
defaultSenderInit.Do(func() {
- // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
- defaultTransport := http.DefaultTransport.(*http.Transport)
+ // copied from http.DefaultTransport with a TLS minimum version.
transport := &http.Transport{
- Proxy: defaultTransport.Proxy,
- DialContext: defaultTransport.DialContext,
- MaxIdleConns: defaultTransport.MaxIdleConns,
- IdleConnTimeout: defaultTransport.IdleConnTimeout,
- TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
- ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
index 3b61a2b6e9..8192e11054 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -49,6 +49,7 @@ type ResourceIdentifier struct {
Storage string `json:"storage"`
Synapse string `json:"synapse"`
ServiceBus string `json:"serviceBus"`
+ SQLDatabase string `json:"sqlDatabase"`
}
// Environment represents a set of endpoints for each of Azure's Clouds.
@@ -121,6 +122,7 @@ var (
Storage: "https://storage.azure.com/",
Synapse: "https://dev.azuresynapse.net",
ServiceBus: "https://servicebus.azure.net/",
+ SQLDatabase: "https://database.windows.net/",
},
}
@@ -162,6 +164,7 @@ var (
Storage: "https://storage.azure.com/",
Synapse: NotAvailable,
ServiceBus: "https://servicebus.azure.net/",
+ SQLDatabase: "https://database.usgovcloudapi.net/",
},
}
@@ -203,6 +206,7 @@ var (
Storage: "https://storage.azure.com/",
Synapse: "https://dev.azuresynapse.net",
ServiceBus: "https://servicebus.azure.net/",
+ SQLDatabase: "https://database.chinacloudapi.cn/",
},
}
@@ -244,6 +248,7 @@ var (
Storage: "https://storage.azure.com/",
Synapse: NotAvailable,
ServiceBus: "https://servicebus.azure.net/",
+ SQLDatabase: "https://database.cloudapi.de/",
},
}
)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod
index c27bef1b70..383d10290f 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/go.mod
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod
@@ -4,7 +4,7 @@ go 1.15
require (
github.com/Azure/go-autorest v14.2.0+incompatible
- github.com/Azure/go-autorest/autorest/adal v0.9.13
+ github.com/Azure/go-autorest/autorest/adal v0.9.14
github.com/Azure/go-autorest/autorest/mocks v0.4.1
github.com/Azure/go-autorest/logger v0.2.1
github.com/Azure/go-autorest/tracing v0.6.0
diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum
index 373d9c4e25..0431571b33 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/go.sum
+++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum
@@ -1,7 +1,7 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
-github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.14 h1:G8hexQdV5D4khOXrWG2YuLCFKhWYmWD8bHYaXN5ophk=
+github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
@@ -12,12 +12,10 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go
index 7a495f732a..118de81411 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go
@@ -20,6 +20,7 @@ import (
"fmt"
"log"
"math"
+ "net"
"net/http"
"net/http/cookiejar"
"strconv"
@@ -129,15 +130,18 @@ func sender(renengotiation tls.RenegotiationSupport) Sender {
// note that we can't init defaultSenders in init() since it will
// execute before calling code has had a chance to enable tracing
defaultSenders[renengotiation].init.Do(func() {
- // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
- defaultTransport := http.DefaultTransport.(*http.Transport)
+ // copied from http.DefaultTransport with a TLS minimum version.
transport := &http.Transport{
- Proxy: defaultTransport.Proxy,
- DialContext: defaultTransport.DialContext,
- MaxIdleConns: defaultTransport.MaxIdleConns,
- IdleConnTimeout: defaultTransport.IdleConnTimeout,
- TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
- ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
Renegotiation: renengotiation,
diff --git a/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
index 494abdbfbd..a26bc530f1 100644
--- a/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ b/vendor/github.com/onsi/ginkgo/CHANGELOG.md
@@ -1,3 +1,10 @@
+## 1.16.5
+
+Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
+1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
+
+You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
+
## 1.16.4
### Fixes
diff --git a/vendor/github.com/onsi/ginkgo/README.md b/vendor/github.com/onsi/ginkgo/README.md
index 05321e6eaf..a25ca5e03a 100644
--- a/vendor/github.com/onsi/ginkgo/README.md
+++ b/vendor/github.com/onsi/ginkgo/README.md
@@ -1,23 +1,18 @@
![Ginkgo: A Go BDD Testing Framework](https://onsi.github.io/ginkgo/images/ginkgo.png)
-[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster)
Jump to the [docs](https://onsi.github.io/ginkgo/) | [ä¸æ–‡æ–‡æ¡£](https://ke-chain.github.io/ginkgodoc) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue, or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
-# Ginkgo 2.0 is coming soon!
+# Ginkgo 2.0 Release Candidate is available!
-An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [v2](https://github.com/onsi/ginkgo/tree/v2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion and links to the original [proposal doc](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#).
+An effort is underway to develop and deliver Ginkgo 2.0. The work is happening in the [ver2](https://github.com/onsi/ginkgo/tree/ver2) branch and a changelog and migration guide is being maintained on that branch [here](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md). Issue [#711](https://github.com/onsi/ginkgo/issues/711) is the central place for discussion.
-As described in the [changelog](https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
+As described in the [changelog](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md) and [proposal](https://docs.google.com/document/d/1h28ZknXRsTLPNNiOjdHIO-F2toCzq4xoZDXbfYaBdoQ/edit#), Ginkgo 2.0 will clean up the Ginkgo codebase, deprecate and remove some v1 functionality, and add several new much-requested features. To help users get ready for the migration, Ginkgo v1 has started emitting deprecation warnings for features that will no longer be supported with links to documentation for how to migrate away from these features. If you have concerns or comments please chime in on [#711](https://github.com/onsi/ginkgo/issues/711).
-The current timeline for completion of 2.0 looks like:
-
-- Early April 2021: first public release of 2.0, deprecation warnings land in v1.
-- May 2021: first beta/rc of 2.0 with most new functionality in place.
-- June/July 2021: 2.0 ships and fully replaces the 1.x codebase on master.
+Please start exploring and using the V2 release! To get started follow the [Using the Release Candidate](https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta) directions in the migration guide.
## TLDR
Ginkgo builds on Go's `testing` package, allowing expressive [Behavior-Driven Development](https://en.wikipedia.org/wiki/Behavior-driven_development) ("BDD") style tests.
diff --git a/vendor/github.com/onsi/ginkgo/config/config.go b/vendor/github.com/onsi/ginkgo/config/config.go
index 5f3f43969b..3130c77897 100644
--- a/vendor/github.com/onsi/ginkgo/config/config.go
+++ b/vendor/github.com/onsi/ginkgo/config/config.go
@@ -20,7 +20,7 @@ import (
"fmt"
)
-const VERSION = "1.16.4"
+const VERSION = "1.16.5"
type GinkgoConfigType struct {
RandomSeed int64
diff --git a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
index 4a6e1e1ee7..ccd7685e38 100644
--- a/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ b/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
@@ -73,9 +73,15 @@ func GinkgoRandomSeed() int64 {
return config.GinkgoConfig.RandomSeed
}
-//GinkgoParallelNode returns the parallel node number for the current ginkgo process
-//The node number is 1-indexed
+//GinkgoParallelNode is deprecated, use GinkgoParallelProcess instead
func GinkgoParallelNode() int {
+ deprecationTracker.TrackDeprecation(types.Deprecations.ParallelNode(), codelocation.New(1))
+ return GinkgoParallelProcess()
+}
+
+//GinkgoParallelProcess returns the parallel process number for the current ginkgo process
+//The process number is 1-indexed
+func GinkgoParallelProcess() int {
return config.GinkgoConfig.ParallelNode
}
@@ -109,6 +115,7 @@ func GinkgoT(optionalOffset ...int) GinkgoTInterface {
//in the testing package's T.
type GinkgoTInterface interface {
Cleanup(func())
+ Setenv(key, value string)
Error(args ...interface{})
Errorf(format string, args ...interface{})
Fail()
diff --git a/vendor/github.com/onsi/ginkgo/go.mod b/vendor/github.com/onsi/ginkgo/go.mod
index 86a5a97be1..1711443266 100644
--- a/vendor/github.com/onsi/ginkgo/go.mod
+++ b/vendor/github.com/onsi/ginkgo/go.mod
@@ -1,6 +1,6 @@
module github.com/onsi/ginkgo
-go 1.15
+go 1.16
require (
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0
diff --git a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
index d7bbb7a96b..4dcfaf4cd8 100644
--- a/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
+++ b/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
@@ -34,6 +34,11 @@ func (t *ginkgoTestingTProxy) Cleanup(func()) {
// No-op
}
+func (t *ginkgoTestingTProxy) Setenv(kev, value string) {
+ fmt.Println("Setenv is a noop for Ginkgo at the moment but will be implemented in V2")
+ // No-op until Cleanup is implemented
+}
+
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
t.fail(fmt.Sprintln(args...), t.offset)
}
diff --git a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
index 305c134b78..d5a6658f35 100644
--- a/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
+++ b/vendor/github.com/onsi/ginkgo/types/deprecation_support.go
@@ -52,6 +52,14 @@ func (d deprecations) Measure() Deprecation {
}
}
+func (d deprecations) ParallelNode() Deprecation {
+ return Deprecation{
+ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
+ DocLink: "renamed-ginkgoparallelnode",
+ Version: "1.16.5",
+ }
+}
+
func (d deprecations) Convert() Deprecation {
return Deprecation{
Message: "The convert command is deprecated in Ginkgo V2",
@@ -99,16 +107,18 @@ func (d *DeprecationTracker) DidTrackDeprecations() bool {
}
func (d *DeprecationTracker) DeprecationsReport() string {
- out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
+ out := formatter.F("\n{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
out += formatter.F("{{light-yellow}}============================================={{/}}\n")
- out += formatter.F("Ginkgo 2.0 is under active development and will introduce (a small number of) breaking changes.\n")
- out += formatter.F("To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md{{/}}\n")
- out += formatter.F("To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
+ out += formatter.F("{{bold}}{{green}}Ginkgo 2.0{{/}} is under active development and will introduce several new features, improvements, and a small handful of breaking changes.\n")
+ out += formatter.F("A release candidate for 2.0 is now available and 2.0 should GA in Fall 2021. {{bold}}Please give the RC a try and send us feedback!{{/}}\n")
+ out += formatter.F(" - To learn more, view the migration guide at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md{{/}}\n")
+ out += formatter.F(" - For instructions on using the Release Candidate visit {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#using-the-beta{{/}}\n")
+ out += formatter.F(" - To comment, chime in at {{cyan}}{{underline}}https://github.com/onsi/ginkgo/issues/711{{/}}\n\n")
for deprecation, locations := range d.deprecations {
out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
if deprecation.DocLink != "" {
- out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/v2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
+ out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://github.com/onsi/ginkgo/blob/ver2/docs/MIGRATING_TO_V2.md#%s{{/}}\n", deprecation.DocLink)
}
for _, location := range locations {
out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md
index 3486f35820..18190e8b91 100644
--- a/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,11 @@
+## 1.16.0
+
+### Features
+- feat: HaveHTTPStatus multiple expected values (#465) [aa69f1b]
+- feat: HaveHTTPHeaderWithValue() matcher (#463) [dd83a96]
+- feat: HaveHTTPBody matcher (#462) [504e1f2]
+- feat: formatter for HTTP responses (#461) [e5b3157]
+
## 1.15.0
### Fixes
diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod
index 62b8f396c5..7fea4ac07a 100644
--- a/vendor/github.com/onsi/gomega/go.mod
+++ b/vendor/github.com/onsi/gomega/go.mod
@@ -1,6 +1,6 @@
module github.com/onsi/gomega
-go 1.14
+go 1.16
require (
github.com/golang/protobuf v1.5.2
diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum
index 177d5e876c..56f1b44e22 100644
--- a/vendor/github.com/onsi/gomega/go.sum
+++ b/vendor/github.com/onsi/gomega/go.sum
@@ -1,4 +1,5 @@
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
@@ -20,6 +21,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@@ -30,13 +32,19 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -47,6 +55,7 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG0
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -60,6 +69,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -85,6 +95,7 @@ google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/l
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go
index 6c7f1d9b7a..84775142c8 100644
--- a/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -22,7 +22,7 @@ import (
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.15.0"
+const GOMEGA_VERSION = "1.16.0"
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go
index 667160ade8..223f6ef530 100644
--- a/vendor/github.com/onsi/gomega/matchers.go
+++ b/vendor/github.com/onsi/gomega/matchers.go
@@ -423,10 +423,29 @@ func BeADirectory() types.GomegaMatcher {
//Expected must be either an int or a string.
// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
-func HaveHTTPStatus(expected interface{}) types.GomegaMatcher {
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
+func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
return &matchers.HaveHTTPStatusMatcher{Expected: expected}
}
+// HaveHTTPHeaderWithValue succeeds if the header is found and the value matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be a string header name, followed by a header value which
+// can be a string, or another matcher.
+func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPHeaderWithValueMatcher{
+ Header: header,
+ Value: value,
+ }
+}
+
+// HaveHTTPBody matches if the body matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be either a string, []byte, or other matcher
+func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPBodyMatcher{Expected: expected}
+}
+
//And succeeds only if all of the given matchers succeed.
//The matchers are tried in order, and will fail-fast if one doesn't succeed.
// Expect("hi").To(And(HaveLen(2), Equal("hi"))
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
new file mode 100644
index 0000000000..66cbb254a3
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -0,0 +1,101 @@
+package matchers
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type HaveHTTPBodyMatcher struct {
+ Expected interface{}
+ cachedBody []byte
+}
+
+func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return false, err
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).Match(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).Match(body)
+ case types.GomegaMatcher:
+ return e.Match(body)
+ default:
+ return false, fmt.Errorf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return fmt.Sprintf("failed to read body: %s", err)
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).FailureMessage(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).FailureMessage(body)
+ case types.GomegaMatcher:
+ return e.FailureMessage(body)
+ default:
+ return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return fmt.Sprintf("failed to read body: %s", err)
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).NegatedFailureMessage(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).NegatedFailureMessage(body)
+ case types.GomegaMatcher:
+ return e.NegatedFailureMessage(body)
+ default:
+ return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+// body returns the body. It is cached because once we read it in Match()
+// the Reader is closed and it is not readable again in FailureMessage()
+// or NegatedFailureMessage()
+func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+ if matcher.cachedBody != nil {
+ return matcher.cachedBody, nil
+ }
+
+ body := func(a *http.Response) ([]byte, error) {
+ if a.Body != nil {
+ defer a.Body.Close()
+ var err error
+ matcher.cachedBody, err = ioutil.ReadAll(a.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error reading response body: %w", err)
+ }
+ }
+ return matcher.cachedBody, nil
+ }
+
+ switch a := actual.(type) {
+ case *http.Response:
+ return body(a)
+ case *httptest.ResponseRecorder:
+ return body(a.Result())
+ default:
+ return nil, fmt.Errorf("HaveHTTPBody matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
new file mode 100644
index 0000000000..c256f452e8
--- /dev/null
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -0,0 +1,81 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type HaveHTTPHeaderWithValueMatcher struct {
+ Header string
+ Value interface{}
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ return false, err
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ return false, err
+ }
+
+ return headerMatcher.Match(headerValue)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ diff := format.IndentString(headerMatcher.FailureMessage(headerValue), 1)
+ return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ diff := format.IndentString(headerMatcher.NegatedFailureMessage(headerValue), 1)
+ return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatcher, error) {
+ switch m := matcher.Value.(type) {
+ case string:
+ return &EqualMatcher{Expected: matcher.Value}, nil
+ case types.GomegaMatcher:
+ return m, nil
+ default:
+ return nil, fmt.Errorf("HaveHTTPHeaderWithValue matcher must be passed a string or a GomegaMatcher. Got:\n%s", format.Object(matcher.Value, 1))
+ }
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+ switch r := actual.(type) {
+ case *http.Response:
+ return r.Header.Get(matcher.Header), nil
+ case *httptest.ResponseRecorder:
+ return r.Result().Header.Get(matcher.Header), nil
+ default:
+ return "", fmt.Errorf("HaveHTTPHeaderWithValue matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 3ce4800b73..70f54899ad 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -2,14 +2,17 @@ package matchers
import (
"fmt"
+ "io/ioutil"
"net/http"
"net/http/httptest"
+ "reflect"
+ "strings"
"github.com/onsi/gomega/format"
)
type HaveHTTPStatusMatcher struct {
- Expected interface{}
+ Expected []interface{}
}
func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
@@ -23,20 +26,71 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e
return false, fmt.Errorf("HaveHTTPStatus matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
}
- switch e := matcher.Expected.(type) {
- case int:
- return resp.StatusCode == e, nil
- case string:
- return resp.Status == e, nil
+ if len(matcher.Expected) == 0 {
+ return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got nothing")
}
- return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got:\n%s", format.Object(matcher.Expected, 1))
+ for _, expected := range matcher.Expected {
+ switch e := expected.(type) {
+ case int:
+ if resp.StatusCode == e {
+ return true, nil
+ }
+ case string:
+ if resp.Status == e {
+ return true, nil
+ }
+ default:
+ return false, fmt.Errorf("HaveHTTPStatus matcher must be passed int or string types. Got:\n%s", format.Object(expected, 1))
+ }
+ }
+
+ return false, nil
}
func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to have HTTP status", matcher.Expected)
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
}
func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to have HTTP status", matcher.Expected)
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
+}
+
+func (matcher *HaveHTTPStatusMatcher) expectedString() string {
+ var lines []string
+ for _, expected := range matcher.Expected {
+ lines = append(lines, format.Object(expected, 1))
+ }
+ return strings.Join(lines, "\n")
+}
+
+func formatHttpResponse(input interface{}) string {
+ var resp *http.Response
+ switch r := input.(type) {
+ case *http.Response:
+ resp = r
+ case *httptest.ResponseRecorder:
+ resp = r.Result()
+ default:
+ return "cannot format invalid HTTP response"
+ }
+
+ body := ""
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ data = []byte("")
+ }
+ body = format.Object(string(data), 0)
+ }
+
+ var s strings.Builder
+ s.WriteString(fmt.Sprintf("%s<%s>: {\n", format.Indent, reflect.TypeOf(input)))
+ s.WriteString(fmt.Sprintf("%s%sStatus: %s\n", format.Indent, format.Indent, format.Object(resp.Status, 0)))
+ s.WriteString(fmt.Sprintf("%s%sStatusCode: %s\n", format.Indent, format.Indent, format.Object(resp.StatusCode, 0)))
+ s.WriteString(fmt.Sprintf("%s%sBody: %s\n", format.Indent, format.Indent, body))
+ s.WriteString(fmt.Sprintf("%s}", format.Indent))
+
+ return s.String()
}
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
index baacf3f10c..db68d290dc 100644
--- a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
+++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go
@@ -126,7 +126,7 @@ const (
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// AnnotationTopologyAwareHints can be used to enable or disable Topology
- // Aware Hints for a Service. This may be set to "auto" or "disabled". Any
- // other value is treated as "disabled".
+ // Aware Hints for a Service. This may be set to "Auto" or "Disabled". Any
+ // other value is treated as "Disabled".
AnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
)
diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
index 57292c5f36..96a4ea6fdd 100644
--- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
+++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
@@ -4267,7 +4267,7 @@ func ValidateService(service *core.Service) field.ErrorList {
allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i]))
}
} else {
- allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...)
+ allErrs = append(allErrs, ValidateNonSpecialIP(ip, idxPath)...)
}
}
@@ -5755,15 +5755,19 @@ func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path)
allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg))
}
}
- allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...)
+ allErrs = append(allErrs, ValidateNonSpecialIP(address.IP, fldPath.Child("ip"))...)
return allErrs
}
-func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
- // We disallow some IPs as endpoints or external-ips. Specifically,
- // unspecified and loopback addresses are nonsensical and link-local
- // addresses tend to be used for node-centric purposes (e.g. metadata
- // service).
+// ValidateNonSpecialIP is used to validate Endpoints, EndpointSlices, and
+// external IPs. Specifically, this disallows unspecified and loopback addresses
+// are nonsensical and link-local addresses tend to be used for node-centric
+// purposes (e.g. metadata service).
+//
+// IPv6 references
+// - https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml
+// - https://www.iana.org/assignments/ipv6-multicast-addresses/ipv6-multicast-addresses.xhtml
+func ValidateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
ip := net.ParseIP(ipAddress)
if ip == nil {
diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go
index ae9aa951b1..98eeab4061 100644
--- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go
+++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go
@@ -213,7 +213,7 @@ const (
func initImageConfigs() (map[int]Config, map[int]Config) {
configs := map[int]Config{}
- configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.31"}
+ configs[Agnhost] = Config{promoterE2eRegistry, "agnhost", "2.32"}
configs[AgnhostPrivate] = Config{PrivateRegistry, "agnhost", "2.6"}
configs[AuthenticatedAlpine] = Config{gcAuthenticatedRegistry, "alpine", "3.7"}
configs[AuthenticatedWindowsNanoServer] = Config{gcAuthenticatedRegistry, "windows-nanoserver", "v1"}
@@ -223,7 +223,7 @@ func initImageConfigs() (map[int]Config, map[int]Config) {
configs[CheckMetadataConcealment] = Config{promoterE2eRegistry, "metadata-concealment", "1.6"}
configs[CudaVectorAdd] = Config{e2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{promoterE2eRegistry, "cuda-vector-add", "2.2"}
- configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.5.0"}
+ configs[DebianIptables] = Config{buildImageRegistry, "debian-iptables", "buster-v1.6.0"}
configs[EchoServer] = Config{promoterE2eRegistry, "echoserver", "2.3"}
configs[Etcd] = Config{gcEtcdRegistry, "etcd", "3.4.13-0"}
configs[GlusterDynamicProvisioner] = Config{promoterE2eRegistry, "glusterdynamic-provisioner", "v1.0"}
diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go
index 1da6f6664a..2cab2c5800 100644
--- a/vendor/k8s.io/utils/pointer/pointer.go
+++ b/vendor/k8s.io/utils/pointer/pointer.go
@@ -46,6 +46,24 @@ func AllPtrFieldsNil(obj interface{}) bool {
return true
}
+// Int returns a pointer to an int
+func Int(i int) *int {
+ return &i
+}
+
+var IntPtr = Int // for back-compat
+
+// IntDeref dereferences the int ptr and returns it if not nil, or else
+// returns def.
+func IntDeref(ptr *int, def int) int {
+ if ptr != nil {
+ return *ptr
+ }
+ return def
+}
+
+var IntPtrDerefOr = IntDeref // for back-compat
+
// Int32 returns a pointer to an int32.
func Int32(i int32) *int32 {
return &i
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5172fbadd0..4c344a0c98 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -18,11 +18,11 @@ github.com/Azure/azure-sdk-for-go/version
github.com/Azure/azure-storage-file-go/azfile
# github.com/Azure/go-autorest v14.2.0+incompatible
github.com/Azure/go-autorest
-# github.com/Azure/go-autorest/autorest v0.11.20
+# github.com/Azure/go-autorest/autorest v0.11.21
## explicit
github.com/Azure/go-autorest/autorest
github.com/Azure/go-autorest/autorest/azure
-# github.com/Azure/go-autorest/autorest/adal v0.9.15
+# github.com/Azure/go-autorest/autorest/adal v0.9.16
## explicit
github.com/Azure/go-autorest/autorest/adal
# github.com/Azure/go-autorest/autorest/date v0.3.0
@@ -168,7 +168,7 @@ github.com/nxadm/tail/ratelimiter
github.com/nxadm/tail/util
github.com/nxadm/tail/watch
github.com/nxadm/tail/winfile
-# github.com/onsi/ginkgo v1.16.4
+# github.com/onsi/ginkgo v1.16.5
## explicit
github.com/onsi/ginkgo
github.com/onsi/ginkgo/config
@@ -190,7 +190,7 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
-# github.com/onsi/gomega v1.15.0
+# github.com/onsi/gomega v1.16.0
## explicit
github.com/onsi/gomega
github.com/onsi/gomega/format
@@ -387,7 +387,7 @@ gopkg.in/tomb.v1
gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gopkg.in/yaml.v3
-# k8s.io/api v0.22.1 => k8s.io/api v0.21.0
+# k8s.io/api v0.22.3 => k8s.io/api v0.21.0
## explicit
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
@@ -434,7 +434,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
-# k8s.io/apimachinery v0.22.1 => k8s.io/apimachinery v0.21.0
+# k8s.io/apimachinery v0.22.3 => k8s.io/apimachinery v0.21.0
## explicit
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@@ -491,7 +491,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/apiserver v0.22.1 => k8s.io/apiserver v0.21.0
+# k8s.io/apiserver v0.22.3 => k8s.io/apiserver v0.21.0
k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration
k8s.io/apiserver/pkg/admission/initializer
@@ -530,7 +530,7 @@ k8s.io/apiserver/pkg/storage/names
k8s.io/apiserver/pkg/util/feature
k8s.io/apiserver/pkg/util/webhook
k8s.io/apiserver/pkg/warning
-# k8s.io/client-go v0.22.1 => k8s.io/client-go v0.21.0
+# k8s.io/client-go v0.22.3 => k8s.io/client-go v0.21.0
## explicit
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
@@ -806,14 +806,14 @@ k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/testing
k8s.io/client-go/util/workqueue
-# k8s.io/cloud-provider v0.22.1 => k8s.io/cloud-provider v0.21.0
+# k8s.io/cloud-provider v0.22.2 => k8s.io/cloud-provider v0.21.0
## explicit
k8s.io/cloud-provider
k8s.io/cloud-provider/service/helpers
k8s.io/cloud-provider/volume
k8s.io/cloud-provider/volume/errors
k8s.io/cloud-provider/volume/helpers
-# k8s.io/component-base v0.22.1 => k8s.io/component-base v0.21.0
+# k8s.io/component-base v0.22.3 => k8s.io/component-base v0.21.0
## explicit
k8s.io/component-base/cli/flag
k8s.io/component-base/config
@@ -834,9 +834,9 @@ k8s.io/kube-openapi/pkg/util/proto
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.21.0
k8s.io/kubectl/pkg/scale
k8s.io/kubectl/pkg/util/podutils
-# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.21.0
+# k8s.io/kubelet v0.22.2 => k8s.io/kubelet v0.21.0
k8s.io/kubelet/pkg/apis/stats/v1alpha1
-# k8s.io/kubernetes v1.21.0
+# k8s.io/kubernetes v1.21.1
## explicit
k8s.io/kubernetes/pkg/api/legacyscheme
k8s.io/kubernetes/pkg/api/service
@@ -906,7 +906,7 @@ k8s.io/kubernetes/test/utils/image
# k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.21.1
## explicit
k8s.io/mount-utils
-# k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9
+# k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
## explicit
k8s.io/utils/buffer
k8s.io/utils/exec
@@ -924,7 +924,7 @@ k8s.io/utils/trace
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
-# sigs.k8s.io/cloud-provider-azure v0.7.4 => sigs.k8s.io/cloud-provider-azure v0.7.1-0.20210908075721-13c8062485f8
+# sigs.k8s.io/cloud-provider-azure v0.7.4 => sigs.k8s.io/cloud-provider-azure v0.7.1-0.20211103062220-cfbf336adc4e
## explicit
sigs.k8s.io/cloud-provider-azure/pkg/auth
sigs.k8s.io/cloud-provider-azure/pkg/azureclients
@@ -977,7 +977,7 @@ sigs.k8s.io/structured-merge-diff/v4/fieldpath
sigs.k8s.io/structured-merge-diff/v4/schema
sigs.k8s.io/structured-merge-diff/v4/typed
sigs.k8s.io/structured-merge-diff/v4/value
-# sigs.k8s.io/yaml v1.2.0
+# sigs.k8s.io/yaml v1.3.0
## explicit
sigs.k8s.io/yaml
# github.com/container-storage-interface/spec => github.com/container-storage-interface/spec v1.5.0
@@ -1013,4 +1013,4 @@ sigs.k8s.io/yaml
# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.21.0
# k8s.io/sample-controller => k8s.io/sample-controller v0.21.0
# sigs.k8s.io/azurefile-csi-driver => ./
-# sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.1-0.20210908075721-13c8062485f8
+# sigs.k8s.io/cloud-provider-azure => sigs.k8s.io/cloud-provider-azure v0.7.1-0.20211103062220-cfbf336adc4e
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go
index 895f01bfaf..531b822073 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/azure_armclient.go
@@ -599,7 +599,7 @@ func (c *Client) PutResourceAsync(ctx context.Context, resourceID string, parame
}
// PostResource posts a resource by resource ID
-func (c *Client) PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error) {
+func (c *Client) PostResource(ctx context.Context, resourceID, action string, parameters interface{}, queryParameters map[string]interface{}) (*http.Response, *retry.Error) {
pathParameters := map[string]interface{}{
"resourceID": resourceID,
"action": action,
@@ -609,6 +609,10 @@ func (c *Client) PostResource(ctx context.Context, resourceID, action string, pa
autorest.WithPathParameters("{resourceID}/{action}", pathParameters),
autorest.WithJSON(parameters),
}
+ if len(queryParameters) > 0 {
+ decorators = append(decorators, autorest.WithQueryParameters(queryParameters))
+ }
+
request, err := c.PreparePostRequest(ctx, decorators...)
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "post.prepare", resourceID, err)
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go
index 48d6de8a34..20a36bb9e3 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/armclient/interface.go
@@ -92,7 +92,7 @@ type Interface interface {
GetResourceWithDecorators(ctx context.Context, resourceID string, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error)
// PostResource posts a resource by resource ID
- PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error)
+ PostResource(ctx context.Context, resourceID, action string, parameters interface{}, queryParameters map[string]interface{}) (*http.Response, *retry.Error)
// DeleteResource deletes a resource by resource ID
DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go
index 7aecca20ff..1e3f647aac 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/deploymentclient/azure_deploymentclient.go
@@ -438,7 +438,7 @@ func (c *Client) ExportTemplate(ctx context.Context, resourceGroupName string, d
autorest.Encode("path", c.subscriptionID),
autorest.Encode("path", resourceGroupName),
autorest.Encode("path", deploymentName))
- response, rerr := c.armClient.PostResource(ctx, resourceID, "exportTemplate", struct{}{})
+ response, rerr := c.armClient.PostResource(ctx, resourceID, "exportTemplate", struct{}{}, map[string]interface{}{})
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.exportTemplate.request", resourceID, rerr.Error())
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
index 80fba4b1e9..405464a31c 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/diskclient/interface.go
@@ -26,7 +26,7 @@ import (
const (
// APIVersion is the API version for compute.
- APIVersion = "2020-12-01"
+ APIVersion = "2021-04-01"
// AzureStackCloudAPIVersion is the API version for Azure Stack
AzureStackCloudAPIVersion = "2019-03-01"
// AzureStackCloudName is the cloud name of Azure Stack
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go
index d160e7c1be..0c2369b9b4 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednsclient/azure_privatednsclient.go
@@ -33,7 +33,7 @@ type Client struct {
// New creates a new privatedns client.
func New(config *azclients.ClientConfig) *Client {
- privateDNSClient := privatedns.NewPrivateZonesClient(config.SubscriptionID)
+ privateDNSClient := privatedns.NewPrivateZonesClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
privateDNSClient.Authorizer = config.Authorizer
client := &Client{
privateDNSClient: privateDNSClient,
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go
index 71e218643e..401d7d0d38 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privatednszonegroupclient/azure_privatednszonegroupclient.go
@@ -33,7 +33,7 @@ type Client struct {
// New creates a new private dns zone group client.
func New(config *azclients.ClientConfig) *Client {
- privateDNSZoneGroupClient := network.NewPrivateDNSZoneGroupsClient(config.SubscriptionID)
+ privateDNSZoneGroupClient := network.NewPrivateDNSZoneGroupsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
privateDNSZoneGroupClient.Authorizer = config.Authorizer
client := &Client{
privateDNSZoneGroupClient: privateDNSZoneGroupClient,
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go
index 15096d4123..f7883321fd 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/privateendpointclient/azure_privateendpointclient.go
@@ -33,7 +33,7 @@ type Client struct {
// New creates a new private endpoint client.
func New(config *azclients.ClientConfig) *Client {
- privateEndpointClient := network.NewPrivateEndpointsClient(config.SubscriptionID)
+ privateEndpointClient := network.NewPrivateEndpointsClientWithBaseURI(config.ResourceManagerEndpoint, config.SubscriptionID)
privateEndpointClient.Authorizer = config.Authorizer
client := &Client{
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go
index 02591e5251..3b681127ff 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/storageaccountclient/azure_storageaccountclient.go
@@ -187,7 +187,7 @@ func (c *Client) listStorageAccountKeys(ctx context.Context, resourceGroupName s
)
result := storage.AccountListKeysResult{}
- response, rerr := c.armClient.PostResource(ctx, resourceID, "listKeys", struct{}{})
+ response, rerr := c.armClient.PostResource(ctx, resourceID, "listKeys", struct{}{}, map[string]interface{}{})
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "storageaccount.listkeys.request", resourceID, rerr.Error())
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go
index 8ce131feb4..40d52e93fd 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/azure_vmssclient.go
@@ -472,7 +472,7 @@ func (c *Client) DeleteInstances(ctx context.Context, resourceGroupName string,
}
// DeleteInstancesAsync sends the delete request to ARM client and DOEST NOT wait on the future
-func (c *Client) DeleteInstancesAsync(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error) {
+func (c *Client) DeleteInstancesAsync(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, forceDelete bool) (*azure.Future, *retry.Error) {
mc := metrics.NewMetricContext("vmss", "delete_instances_async", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
@@ -495,7 +495,13 @@ func (c *Client) DeleteInstancesAsync(ctx context.Context, resourceGroupName str
vmScaleSetName,
)
- response, rerr := c.armClient.PostResource(ctx, resourceID, "delete", vmInstanceIDs)
+ var queryParameters map[string]interface{}
+ if forceDelete {
+ queryParameters = map[string]interface{}{
+ "forceDeletion": true,
+ }
+ }
+ response, rerr := c.armClient.PostResource(ctx, resourceID, "delete", vmInstanceIDs, queryParameters)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
@@ -544,7 +550,7 @@ func (c *Client) DeallocateInstancesAsync(ctx context.Context, resourceGroupName
vmScaleSetName,
)
- response, rerr := c.armClient.PostResource(ctx, resourceID, "deallocate", vmInstanceIDs)
+ response, rerr := c.armClient.PostResource(ctx, resourceID, "deallocate", vmInstanceIDs, map[string]interface{}{})
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
@@ -593,7 +599,7 @@ func (c *Client) StartInstancesAsync(ctx context.Context, resourceGroupName stri
vmScaleSetName,
)
- response, rerr := c.armClient.PostResource(ctx, resourceID, "start", vmInstanceIDs)
+ response, rerr := c.armClient.PostResource(ctx, resourceID, "start", vmInstanceIDs, map[string]interface{}{})
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
@@ -626,7 +632,7 @@ func (c *Client) deleteVMSSInstances(ctx context.Context, resourceGroupName stri
"Microsoft.Compute/virtualMachineScaleSets",
vmScaleSetName,
)
- response, rerr := c.armClient.PostResource(ctx, resourceID, "delete", vmInstanceIDs)
+ response, rerr := c.armClient.PostResource(ctx, resourceID, "delete", vmInstanceIDs, map[string]interface{}{})
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmss.deletevms.request", resourceID, rerr.Error())
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go
index a1b1bd86b4..5f572e35f5 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/interface.go
@@ -57,7 +57,7 @@ type Interface interface {
DeleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) *retry.Error
// DeleteInstancesAsync sends the delete request to the ARM client and DOEST NOT wait on the future
- DeleteInstancesAsync(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error)
+ DeleteInstancesAsync(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, forceDelete bool) (*azure.Future, *retry.Error)
// WaitForCreateOrUpdateResult waits for the response of the create or update request
WaitForCreateOrUpdateResult(ctx context.Context, future *azure.Future, resourceGroupName string) (*http.Response, error)
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go
index d3871345b9..2cbd7d4c77 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/azureclients/vmssclient/mockvmssclient/interface.go
@@ -144,18 +144,18 @@ func (mr *MockInterfaceMockRecorder) DeleteInstances(ctx, resourceGroupName, vmS
}
// DeleteInstancesAsync mocks base method.
-func (m *MockInterface) DeleteInstancesAsync(ctx context.Context, resourceGroupName, vmScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs) (*azure.Future, *retry.Error) {
+func (m *MockInterface) DeleteInstancesAsync(ctx context.Context, resourceGroupName, VMScaleSetName string, vmInstanceIDs compute.VirtualMachineScaleSetVMInstanceRequiredIDs, forceDelete bool) (*azure.Future, *retry.Error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "DeleteInstancesAsync", ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs)
+ ret := m.ctrl.Call(m, "DeleteInstancesAsync", ctx, resourceGroupName, VMScaleSetName, vmInstanceIDs, forceDelete)
ret0, _ := ret[0].(*azure.Future)
ret1, _ := ret[1].(*retry.Error)
return ret0, ret1
}
// DeleteInstancesAsync indicates an expected call of DeleteInstancesAsync.
-func (mr *MockInterfaceMockRecorder) DeleteInstancesAsync(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs interface{}) *gomock.Call {
+func (mr *MockInterfaceMockRecorder) DeleteInstancesAsync(ctx, resourceGroupName, VMScaleSetName, vmInstanceIDs, forceDelete interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeleteInstancesAsync), ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstancesAsync", reflect.TypeOf((*MockInterface)(nil).DeleteInstancesAsync), ctx, resourceGroupName, VMScaleSetName, vmInstanceIDs, forceDelete)
}
// WaitForCreateOrUpdateResult mocks base method.
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go
index 12b04a1fbe..7f926ef6a7 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/consts/consts.go
@@ -271,11 +271,14 @@ const (
ServiceAnnotationAdditionalPublicIPs = "service.beta.kubernetes.io/azure-additional-public-ips"
// ServiceTagKey is the service key applied for public IP tags.
- ServiceTagKey = "service"
+ ServiceTagKey = "k8s-azure-service"
+ LegacyServiceTagKey = "service"
// ClusterNameKey is the cluster name key applied for public IP tags.
- ClusterNameKey = "kubernetes-cluster-name"
+ ClusterNameKey = "k8s-azure-cluster-name"
+ LegacyClusterNameKey = "kubernetes-cluster-name"
// ServiceUsingDNSKey is the service name consuming the DNS label on the public IP
- ServiceUsingDNSKey = "kubernetes-dns-label-service"
+ ServiceUsingDNSKey = "k8s-azure-dns-label-service"
+ LegacyServiceUsingDNSKey = "kubernetes-dns-label-service"
// DefaultLoadBalancerSourceRanges is the default value of the load balancer source ranges
DefaultLoadBalancerSourceRanges = "0.0.0.0/0"
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
index 81c5b796a5..8377103d09 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure.go
@@ -279,7 +279,7 @@ type Cloud struct {
virtualNetworkLinksClient virtualnetworklinksclient.Interface
ResourceRequestBackoff wait.Backoff
- metadata *InstanceMetadataService
+ Metadata *InstanceMetadataService
VMSet VMSet
// ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates
@@ -297,6 +297,8 @@ type Cloud struct {
nodeResourceGroups map[string]string
// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
unmanagedNodes sets.String
+ // excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
+ excludeLoadBalancerNodes sets.String
// nodeInformerSynced is for determining if the informer has synced.
nodeInformerSynced cache.InformerSynced
@@ -403,11 +405,12 @@ func (az *Cloud) configSecretMetadata(secretName, secretNamespace, cloudConfigKe
func NewCloudFromSecret(clientBuilder cloudprovider.ControllerClientBuilder, secretName, secretNamespace, cloudConfigKey string) (cloudprovider.Interface, error) {
az := &Cloud{
- nodeNames: sets.NewString(),
- nodeZones: map[string]sets.String{},
- nodeResourceGroups: map[string]string{},
- unmanagedNodes: sets.NewString(),
- routeCIDRs: map[string]string{},
+ nodeNames: sets.NewString(),
+ nodeZones: map[string]sets.String{},
+ nodeResourceGroups: map[string]string{},
+ unmanagedNodes: sets.NewString(),
+ routeCIDRs: map[string]string{},
+ excludeLoadBalancerNodes: sets.NewString(),
}
az.configSecretMetadata(secretName, secretNamespace, cloudConfigKey)
@@ -433,11 +436,12 @@ func NewCloudWithoutFeatureGates(configReader io.Reader, callFromCCM bool) (*Clo
}
az := &Cloud{
- nodeNames: sets.NewString(),
- nodeZones: map[string]sets.String{},
- nodeResourceGroups: map[string]string{},
- unmanagedNodes: sets.NewString(),
- routeCIDRs: map[string]string{},
+ nodeNames: sets.NewString(),
+ nodeZones: map[string]sets.String{},
+ nodeResourceGroups: map[string]string{},
+ unmanagedNodes: sets.NewString(),
+ routeCIDRs: map[string]string{},
+ excludeLoadBalancerNodes: sets.NewString(),
}
err = az.InitializeCloudFromConfig(config, false, callFromCCM)
@@ -528,7 +532,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret, callFromC
az.Config = *config
az.Environment = *env
az.ResourceRequestBackoff = resourceRequestBackoff
- az.metadata, err = NewInstanceMetadataService(consts.ImdsServer)
+ az.Metadata, err = NewInstanceMetadataService(consts.ImdsServer)
if err != nil {
return err
}
@@ -937,10 +941,6 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
UpdateFunc: func(prev, obj interface{}) {
prevNode := prev.(*v1.Node)
newNode := obj.(*v1.Node)
- if newNode.Labels[consts.LabelFailureDomainBetaZone] ==
- prevNode.Labels[consts.LabelFailureDomainBetaZone] {
- return
- }
az.updateNodeCaches(prevNode, newNode)
},
DeleteFunc: func(obj interface{}) {
@@ -993,6 +993,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
managed, ok := prevNode.ObjectMeta.Labels[consts.ManagedByAzureLabel]
if ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) {
az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
+ az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
+ }
+
+ // Remove from excludeLoadBalancerNodes cache.
+ if _, hasExcludeBalancerLabel := prevNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
+ az.excludeLoadBalancerNodes.Delete(prevNode.ObjectMeta.Name)
}
}
@@ -1019,6 +1025,12 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
managed, ok := newNode.ObjectMeta.Labels[consts.ManagedByAzureLabel]
if ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) {
az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
+ az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
+ }
+
+ // Add to excludeLoadBalancerNodes cache.
+ if _, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]; hasExcludeBalancerLabel {
+ az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
}
}
}
@@ -1124,16 +1136,23 @@ func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
return sets.NewString(az.unmanagedNodes.List()...), nil
}
-// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged or in external resource group.
-func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(node *v1.Node) bool {
- labels := node.ObjectMeta.Labels
- if rg, ok := labels[consts.ExternalResourceGroupLabel]; ok && !strings.EqualFold(rg, az.ResourceGroup) {
- return true
+// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
+func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
+ // Kubelet won't set az.nodeInformerSynced, always return nil.
+ if az.nodeInformerSynced == nil {
+ return false, nil
+ }
+
+ az.nodeCachesLock.RLock()
+ defer az.nodeCachesLock.RUnlock()
+ if !az.nodeInformerSynced() {
+ return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
}
- if managed, ok := labels[consts.ManagedByAzureLabel]; ok && strings.EqualFold(managed, consts.NotManagedByAzureLabelValue) {
- return true
+ // Return true if the node is in external resource group.
+ if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
+ return true, nil
}
- return false
+ return az.excludeLoadBalancerNodes.Has(nodeName), nil
}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
index 9e8dea4eb9..8aeba3a658 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_backoff.go
@@ -258,23 +258,23 @@ func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer)
return rerr.Error()
}
-// ListAgentPoolLBs invokes az.LoadBalancerClient.List and filter out
+// ListManagedLBs invokes az.LoadBalancerClient.List and filter out
// those that are not managed by cloud provider azure or not associated to a managed VMSet.
-func (az *Cloud) ListAgentPoolLBs(service *v1.Service, nodes []*v1.Node, clusterName string) ([]network.LoadBalancer, error) {
+func (az *Cloud) ListManagedLBs(service *v1.Service, nodes []*v1.Node, clusterName string) ([]network.LoadBalancer, error) {
allLBs, err := az.ListLB(service)
if err != nil {
return nil, err
}
if allLBs == nil {
- klog.Warningf("ListAgentPoolLBs: no LBs found")
+ klog.Warningf("ListManagedLBs: no LBs found")
return nil, nil
}
agentPoolLBs := make([]network.LoadBalancer, 0)
agentPoolVMSetNames, err := az.VMSet.GetAgentPoolVMSetNames(nodes)
if err != nil {
- return nil, fmt.Errorf("ListAgentPoolLBs: failed to get agent pool vmSet names: %w", err)
+ return nil, fmt.Errorf("ListManagedLBs: failed to get agent pool vmSet names: %w", err)
}
agentPoolVMSetNamesSet := sets.NewString()
if agentPoolVMSetNames != nil && len(*agentPoolVMSetNames) > 0 {
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
index 6ad4e06d0d..ccb4c76505 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_controller_common.go
@@ -145,9 +145,10 @@ func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.Azu
return ss, nil
}
-// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI.
+// AttachDisk attaches a disk to vm
+// parameter async indicates whether allow multiple batch disk attach on one node in parallel
// return (lun, error)
-func (c *controllerCommon) AttachDisk(ctx context.Context, isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName,
+func (c *controllerCommon) AttachDisk(ctx context.Context, async bool, diskName, diskURI string, nodeName types.NodeName,
cachingMode compute.CachingTypes, disk *compute.Disk) (int32, error) {
diskEncryptionSetID := ""
writeAcceleratorEnabled := false
@@ -209,7 +210,7 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, isManagedDisk bool, d
options := AttachDiskOptions{
lun: -1,
- isManagedDisk: isManagedDisk,
+ isManagedDisk: true,
diskName: diskName,
cachingMode: cachingMode,
diskEncryptionSetID: diskEncryptionSetID,
@@ -255,12 +256,12 @@ func (c *controllerCommon) AttachDisk(ctx context.Context, isManagedDisk bool, d
return -1, err
}
- if c.diskOpRateLimiter.TryAccept() {
+ if async && c.diskOpRateLimiter.TryAccept() {
// unlock and wait for attach disk complete
unlock = true
c.lockMap.UnlockEntry(node)
} else {
- klog.Warningf("azureDisk - switch to batch operation since disk operation is rate limited, current QPS: %f", c.diskOpRateLimiter.QPS())
+ klog.Warningf("azureDisk - switch to batch operation due to rate limited(async: %t), QPS: %f", async, c.diskOpRateLimiter.QPS())
}
resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
if err != nil {
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
index f9cc308669..2e46c63f8b 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_fakes.go
@@ -81,12 +81,13 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
MaximumLoadBalancerRuleCount: 250,
VMType: consts.VMTypeStandard,
},
- nodeZones: map[string]sets.String{},
- nodeInformerSynced: func() bool { return true },
- nodeResourceGroups: map[string]string{},
- unmanagedNodes: sets.NewString(),
- routeCIDRs: map[string]string{},
- eventRecorder: &record.FakeRecorder{},
+ nodeZones: map[string]sets.String{},
+ nodeInformerSynced: func() bool { return true },
+ nodeResourceGroups: map[string]string{},
+ unmanagedNodes: sets.NewString(),
+ excludeLoadBalancerNodes: sets.NewString(),
+ routeCIDRs: map[string]string{},
+ eventRecorder: &record.FakeRecorder{},
}
az.DisksClient = mockdiskclient.NewMockInterface(ctrl)
az.SnapshotsClient = mocksnapshotclient.NewMockInterface(ctrl)
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
index 6f97744b9a..c6a6080856 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_instances.go
@@ -81,7 +81,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N
}
if az.UseInstanceMetadata {
- metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
+ metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeDefault)
if err != nil {
return nil, err
}
@@ -150,7 +150,7 @@ func (az *Cloud) getLocalInstanceNodeAddresses(netInterfaces []NetworkInterface,
if len(addresses) == 1 {
// No IP addresses is got from instance metadata service, clean up cache and report errors.
- _ = az.metadata.imsCache.Delete(consts.MetadataCacheKey)
+ _ = az.Metadata.imsCache.Delete(consts.MetadataCacheKey)
return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
}
return addresses, nil
@@ -333,7 +333,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e
}
if az.UseInstanceMetadata {
- metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
+ metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeDefault)
if err != nil {
return "", err
}
@@ -423,7 +423,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string,
}
if az.UseInstanceMetadata {
- metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
+ metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeDefault)
if err != nil {
return "", err
}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
index 026b720943..1d49c38955 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_loadbalancer.go
@@ -258,7 +258,7 @@ func (az *Cloud) cleanupVMSetFromBackendPoolByCondition(slb *network.LoadBalance
},
}
// decouple the backendPool from the node
- err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
+ err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, true)
if err != nil {
return nil, err
}
@@ -313,7 +313,7 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust
return true
}
-func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadBalancer, fip *network.FrontendIPConfiguration, clusterName string, service *v1.Service) error {
+func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadBalancer, existingLBs []network.LoadBalancer, fip *network.FrontendIPConfiguration, clusterName string, service *v1.Service) error {
if lb == nil || lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
return nil
}
@@ -348,7 +348,7 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadB
if len(fipConfigs) == 0 {
klog.V(2).Infof("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): deleting load balancer because there is no remaining frontend IP configurations", to.String(lb.Name), to.String(fip.Name), clusterName, service.Name)
- err := az.cleanOrphanedLoadBalancer(lb, service, clusterName)
+ err := az.cleanOrphanedLoadBalancer(lb, existingLBs, service, clusterName)
if err != nil {
klog.Errorf("removeFrontendIPConfigurationFromLoadBalancer(%s, %s, %s, %s): failed to cleanupOrphanedLoadBalancer: %v", to.String(lb.Name), to.String(fip.Name), clusterName, service.Name, err)
return err
@@ -365,7 +365,7 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(lb *network.LoadB
return nil
}
-func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, service *v1.Service, clusterName string) error {
+func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, existingLBs []network.LoadBalancer, service *v1.Service, clusterName string) error {
lbName := to.String(lb.Name)
serviceName := getServiceName(service)
isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service)
@@ -375,6 +375,18 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, service *v1
if isBackendPoolPreConfigured {
klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): ignore cleanup of dirty lb because the lb is pre-configured", lbName, serviceName, clusterName)
} else {
+ foundLB := false
+ for _, existingLB := range existingLBs {
+ if strings.EqualFold(to.String(lb.Name), to.String(existingLB.Name)) {
+ foundLB = true
+ break
+ }
+ }
+ if !foundLB {
+ klog.V(2).Infof("cleanOrphanedLoadBalancer: the LB %s doesn't exist, will not delete it", to.String(lb.Name))
+ return nil
+ }
+
// When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself,
// because an Azure load balancer cannot have an empty FrontendIPConfigurations collection
klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): deleting the LB since there are no remaining frontendIPConfigurations", lbName, serviceName, clusterName)
@@ -385,7 +397,7 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, service *v1
// do nothing for availability set
lb.BackendAddressPools = nil
}
- err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
+ err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
if err != nil {
klog.Errorf("cleanOrphanedLoadBalancer(%s, %s, %s): failed to EnsureBackendPoolDeleted: %v", lbName, serviceName, clusterName, err)
return err
@@ -439,7 +451,7 @@ func (az *Cloud) cleanOrphanedLoadBalancer(lb *network.LoadBalancer, service *v1
// safeDeleteLoadBalancer deletes the load balancer after decoupling it from the vmSet
func (az *Cloud) safeDeleteLoadBalancer(lb network.LoadBalancer, clusterName, vmSetName string, service *v1.Service) error {
lbBackendPoolID := az.getBackendPoolID(to.String(lb.Name), az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
- err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools)
+ err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools, true)
if err != nil {
return fmt.Errorf("deleteDedicatedLoadBalancer: failed to EnsureBackendPoolDeleted: %w", err)
}
@@ -489,14 +501,19 @@ func (az *Cloud) reconcileSharedLoadBalancer(service *v1.Service, clusterName st
err error
)
+ existingLBs, err = az.ListManagedLBs(service, nodes, clusterName)
+ if err != nil {
+ return nil, fmt.Errorf("reconcileSharedLoadBalancer: failed to list LB: %w", err)
+ }
+
// skip this operation when wantLb=false
if nodes == nil {
- return nil, nil
+ return existingLBs, nil
}
// only run once since the controller manager rebooted
if az.isSharedLoadBalancerSynced {
- return nil, nil
+ return existingLBs, nil
}
defer func() {
if err == nil {
@@ -506,12 +523,7 @@ func (az *Cloud) reconcileSharedLoadBalancer(service *v1.Service, clusterName st
// skip if the cluster is using basic LB
if !az.useStandardLoadBalancer() {
- return nil, nil
- }
-
- existingLBs, err = az.ListAgentPoolLBs(service, nodes, clusterName)
- if err != nil {
- return nil, fmt.Errorf("reconcileSharedLoadBalancer: failed to list LB: %w", err)
+ return existingLBs, nil
}
lbBackendPoolName := getBackendPoolName(clusterName, service)
@@ -683,7 +695,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
// select another load balancer instead of returning
// the current one if the change is needed
if wantLb && az.shouldChangeLoadBalancer(service, to.String(existingLB.Name), clusterName) {
- if err := az.removeFrontendIPConfigurationFromLoadBalancer(&existingLB, fipConfig, clusterName, service); err != nil {
+ if err := az.removeFrontendIPConfigurationFromLoadBalancer(&existingLB, existingLBs, fipConfig, clusterName, service); err != nil {
klog.Errorf("getServiceLoadBalancer(%s, %s, %v): failed to remove frontend IP configuration from load balancer: %v", service.Name, clusterName, wantLb, err)
return nil, nil, false, err
}
@@ -990,8 +1002,7 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai
// return if pip exist and dns label is the same
if strings.EqualFold(getDomainNameLabel(&pip), domainNameLabel) {
- if existingServiceName, ok := pip.Tags[consts.ServiceUsingDNSKey]; ok &&
- strings.EqualFold(*existingServiceName, serviceName) {
+ if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) {
klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - "+
"the service is using the DNS label on the public IP", serviceName, pipName)
@@ -1146,10 +1157,8 @@ func (az *Cloud) reconcileIPSettings(pip *network.PublicIPAddress, service *v1.S
func reconcileDNSSettings(pip *network.PublicIPAddress, domainNameLabel, serviceName, pipName string) (bool, error) {
var changed bool
- if existingServiceName, ok := pip.Tags[consts.ServiceUsingDNSKey]; ok {
- if !strings.EqualFold(to.String(existingServiceName), serviceName) {
- return false, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing service %s consuming the DNS label on the public IP, so the service cannot set the DNS label annotation with this value", serviceName, pipName, *existingServiceName)
- }
+ if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && !strings.EqualFold(existingServiceName, serviceName) {
+ return false, fmt.Errorf("ensurePublicIPExists for service(%s): pip(%s) - there is an existing service %s consuming the DNS label on the public IP, so the service cannot set the DNS label annotation with this value", serviceName, pipName, existingServiceName)
}
if len(domainNameLabel) == 0 {
@@ -1172,8 +1181,7 @@ func reconcileDNSSettings(pip *network.PublicIPAddress, domainNameLabel, service
}
}
- if svc, ok := pip.Tags[consts.ServiceUsingDNSKey]; !ok ||
- !strings.EqualFold(to.String(svc), serviceName) {
+ if svc := getServiceFromPIPDNSTags(pip.Tags); svc == "" || !strings.EqualFold(svc, serviceName) {
pip.Tags[consts.ServiceUsingDNSKey] = &serviceName
changed = true
}
@@ -1182,6 +1190,48 @@ func reconcileDNSSettings(pip *network.PublicIPAddress, domainNameLabel, service
return changed, nil
}
+func getServiceFromPIPDNSTags(tags map[string]*string) string {
+ v, ok := tags[consts.ServiceUsingDNSKey]
+ if ok && v != nil {
+ return *v
+ }
+
+ v, ok = tags[consts.LegacyServiceUsingDNSKey]
+ if ok && v != nil {
+ return *v
+ }
+
+ return ""
+}
+
+func getServiceFromPIPServiceTags(tags map[string]*string) string {
+ v, ok := tags[consts.ServiceTagKey]
+ if ok && v != nil {
+ return *v
+ }
+
+ v, ok = tags[consts.LegacyServiceTagKey]
+ if ok && v != nil {
+ return *v
+ }
+
+ return ""
+}
+
+func getClusterFromPIPClusterTags(tags map[string]*string) string {
+ v, ok := tags[consts.ClusterNameKey]
+ if ok && v != nil {
+ return *v
+ }
+
+ v, ok = tags[consts.LegacyClusterNameKey]
+ if ok && v != nil {
+ return *v
+ }
+
+ return ""
+}
+
type serviceIPTagRequest struct {
IPTagsRequestedByAnnotation bool
IPTags *[]network.IPTag
@@ -1477,15 +1527,6 @@ func (az *Cloud) findFrontendIPConfigOfService(
return nil, false, nil
}
-func nodeNameInNodes(nodeName string, nodes []*v1.Node) bool {
- for _, node := range nodes {
- if strings.EqualFold(nodeName, node.Name) {
- return true
- }
- }
- return false
-}
-
// reconcileLoadBalancer ensures load balancer exists and the frontend ip config is setup.
// This also reconciles the Service's Ports with the LoadBalancer config.
// This entails adding rules/probes for expected Ports and removing stale rules/ports.
@@ -1582,7 +1623,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
// If it is not exist, and no change to that, we don't CreateOrUpdate LB
if dirtyLb {
if lb.FrontendIPConfigurations == nil || len(*lb.FrontendIPConfigurations) == 0 {
- err := az.cleanOrphanedLoadBalancer(lb, service, clusterName)
+ err := az.cleanOrphanedLoadBalancer(lb, existingLBs, service, clusterName)
if err != nil {
klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) - failed to cleanOrphanedLoadBalancer: %v", serviceName, lbName, err)
return nil, err
@@ -1896,7 +1937,12 @@ func (az *Cloud) reconcileBackendPools(clusterName string, service *v1.Service,
// would not be in the `nodes` slice. We need to check the nodes that
// have been added to the LB's backendpool, find the unwanted ones and
// delete them from the pool.
- if !nodeNameInNodes(nodeName, nodes) {
+ shouldExcludeLoadBalancer, err := az.ShouldNodeExcludedFromLoadBalancer(nodeName)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
+ return false, false, err
+ }
+ if shouldExcludeLoadBalancer {
klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb backendpool - found unwanted node %s, decouple it from the LB", serviceName, wantLb, nodeName)
// construct a backendPool that only contains the IP config of the node to be deleted
backendIPConfigurationsToBeDeleted = append(backendIPConfigurationsToBeDeleted, network.InterfaceIPConfiguration{ID: to.StringPtr(ipConfID)})
@@ -1912,7 +1958,7 @@ func (az *Cloud) reconcileBackendPools(clusterName string, service *v1.Service,
},
}
// decouple the backendPool from the node
- err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted)
+ err = az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, backendpoolToBeDeleted, false)
if err != nil {
return false, false, err
}
@@ -2641,14 +2687,14 @@ func shouldReleaseExistingOwnedPublicIP(existingPip *network.PublicIPAddress, lb
// Check whether the public IP is being referenced by other service.
// The owned public IP can be released only when there is not other service using it.
- if existingPip.Tags[consts.ServiceTagKey] != nil {
+ if serviceTag := getServiceFromPIPServiceTags(existingPip.Tags); serviceTag != "" {
// case 1: there is at least one reference when deleting the PIP
- if !lbShouldExist && len(parsePIPServiceTag(existingPip.Tags[consts.ServiceTagKey])) > 0 {
+ if !lbShouldExist && len(parsePIPServiceTag(&serviceTag)) > 0 {
return false
}
// case 2: there is at least one reference from other service
- if lbShouldExist && len(parsePIPServiceTag(existingPip.Tags[consts.ServiceTagKey])) > 1 {
+ if lbShouldExist && len(parsePIPServiceTag(&serviceTag)) > 1 {
return false
}
}
@@ -2683,11 +2729,11 @@ func (az *Cloud) ensurePIPTagged(service *v1.Service, pip *network.PublicIPAddre
// include the cluster name and service names tags when comparing
var clusterName, serviceNames *string
- if v, ok := pip.Tags[consts.ClusterNameKey]; ok {
- clusterName = v
+ if v := getClusterFromPIPClusterTags(pip.Tags); v != "" {
+ clusterName = &v
}
- if v, ok := pip.Tags[consts.ServiceTagKey]; ok {
- serviceNames = v
+ if v := getServiceFromPIPServiceTags(pip.Tags); v != "" {
+ serviceNames = &v
}
if clusterName != nil {
configTags[consts.ClusterNameKey] = clusterName
@@ -3113,32 +3159,30 @@ func serviceOwnsPublicIP(service *v1.Service, pip *network.PublicIPAddress, clus
serviceName := getServiceName(service)
if pip.Tags != nil {
- serviceTag := pip.Tags[consts.ServiceTagKey]
- clusterTag := pip.Tags[consts.ClusterNameKey]
+ serviceTag := getServiceFromPIPServiceTags(pip.Tags)
+ clusterTag := getClusterFromPIPClusterTags(pip.Tags)
// if there is no service tag on the pip, it is user-created pip
- if to.String(serviceTag) == "" {
+ if serviceTag == "" {
return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), true
}
- if serviceTag != nil {
- // if there is service tag on the pip, it is system-created pip
- if isSVCNameInPIPTag(*serviceTag, serviceName) {
- // Backward compatible for clusters upgraded from old releases.
- // In such case, only "service" tag is set.
- if clusterTag == nil {
- return true, false
- }
+ // if there is service tag on the pip, it is system-created pip
+ if isSVCNameInPIPTag(serviceTag, serviceName) {
+ // Backward compatible for clusters upgraded from old releases.
+ // In such case, only "service" tag is set.
+ if clusterTag == "" {
+ return true, false
+ }
- // If cluster name tag is set, then return true if it matches.
- if *clusterTag == clusterName {
- return true, false
- }
- } else {
- // if the service is not included in te tags of the system-created pip, check the ip address
- // this could happen for secondary services
- return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), false
+ // If cluster name tag is set, then return true if it matches.
+ if clusterTag == clusterName {
+ return true, false
}
+ } else {
+ // if the service is not included in te tags of the system-created pip, check the ip address
+ // this could happen for secondary services
+ return strings.EqualFold(to.String(pip.IPAddress), service.Spec.LoadBalancerIP), false
}
}
@@ -3158,7 +3202,7 @@ func isSVCNameInPIPTag(tag, svcName string) bool {
}
func parsePIPServiceTag(serviceTag *string) []string {
- if serviceTag == nil {
+ if serviceTag == nil || len(*serviceTag) == 0 {
return []string{}
}
@@ -3188,7 +3232,7 @@ func bindServicesToPIP(pip *network.PublicIPAddress, incomingServiceNames []stri
pip.Tags = map[string]*string{consts.ServiceTagKey: to.StringPtr("")}
}
- serviceTagValue := pip.Tags[consts.ServiceTagKey]
+ serviceTagValue := to.StringPtr(getServiceFromPIPServiceTags(pip.Tags))
serviceTagValueSet := make(map[string]struct{})
existingServiceNames := parsePIPServiceTag(serviceTagValue)
addedNew := false
@@ -3232,7 +3276,7 @@ func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, ser
}
// skip removing tags for user assigned pips
- serviceTagValue := pip.Tags[consts.ServiceTagKey]
+ serviceTagValue := to.StringPtr(getServiceFromPIPServiceTags(pip.Tags))
existingServiceNames := parsePIPServiceTag(serviceTagValue)
var found bool
for i := len(existingServiceNames) - 1; i >= 0; i-- {
@@ -3250,10 +3294,8 @@ func unbindServiceFromPIP(pip *network.PublicIPAddress, service *v1.Service, ser
return err
}
- if existingServiceName, ok := pip.Tags[consts.ServiceUsingDNSKey]; ok {
- if strings.EqualFold(*existingServiceName, serviceName) {
- pip.Tags[consts.ServiceUsingDNSKey] = to.StringPtr("")
- }
+ if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) {
+ pip.Tags[consts.ServiceUsingDNSKey] = to.StringPtr("")
}
return nil
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
index 7cdd7d25db..09e439a646 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_managedDiskController.go
@@ -306,7 +306,7 @@ func (c *ManagedDiskController) GetDisk(resourceGroup, diskName string) (string,
}
// ResizeDisk Expand the disk to new size
-func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
+func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity, supportOnlineResize bool) (resource.Quantity, error) {
ctx, cancel := getContextWithCancel()
defer cancel()
@@ -339,7 +339,7 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan
return newSizeQuant, nil
}
- if result.DiskProperties.DiskState != compute.Unattached {
+ if !supportOnlineResize && result.DiskProperties.DiskState != compute.Unattached {
return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, to.String(result.ManagedBy))
}
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
index 6b2b32747b..4bc9823e51 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_routes.go
@@ -107,6 +107,7 @@ func (d *delayedRouteUpdater) updateRoutes() {
// No need to do any updating.
if len(d.routesToUpdate) == 0 {
+ klog.V(4).Info("updateRoutes: nothing to update, returning")
return
}
@@ -216,6 +217,8 @@ func (d *delayedRouteUpdater) cleanupOutdatedRoutes(existingRoutes []network.Rou
existingRouteName := to.String(existingRoutes[i].Name)
split := strings.Split(existingRouteName, consts.RouteNameSeparator)
+ klog.V(4).Infof("cleanupOutdatedRoutes: checking route %s", existingRouteName)
+
// filter out unmanaged routes
deleteRoute := false
if d.az.nodeNames.Has(split[0]) {
@@ -466,9 +469,8 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute
return nil
}
- klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
-
routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
+ klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeName)
route := network.Route{
Name: to.StringPtr(routeName),
RoutePropertiesFormat: &network.RoutePropertiesFormat{},
@@ -486,6 +488,28 @@ func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute
return err
}
+ // Remove outdated ipv4 routes as well
+ if az.ipv6DualStackEnabled {
+ routeNameWithoutIPV6Suffix := strings.Split(routeName, consts.RouteNameSeparator)[0]
+ klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeNameWithoutIPV6Suffix)
+ route := network.Route{
+ Name: to.StringPtr(routeNameWithoutIPV6Suffix),
+ RoutePropertiesFormat: &network.RoutePropertiesFormat{},
+ }
+ op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route)
+ if err != nil {
+ klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+ return err
+ }
+
+ // Wait for operation complete.
+ err = op.wait()
+ if err != nil {
+ klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
+ return err
+ }
+ }
+
klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
isOperationSucceeded = true
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
index 2ec9aa6035..6dc589e699 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_standard.go
@@ -586,6 +586,7 @@ func (as *availabilitySet) GetInstanceTypeByNodeName(name string) (string, error
// GetZoneByNodeName gets availability zone for the specified node. If the node is not running
// with availability zone, then it returns fault domain.
+// for details, refer to https://kubernetes-sigs.github.io/cloud-provider-azure/topics/availability-zones/#node-labels
func (as *availabilitySet) GetZoneByNodeName(name string) (cloudprovider.Zone, error) {
vm, err := as.getVirtualMachine(types.NodeName(name), azcache.CacheReadTypeUnsafe)
if err != nil {
@@ -949,7 +950,12 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
continue
}
- if as.ShouldNodeExcludedFromLoadBalancer(node) {
+ shouldExcludeLoadBalancer, err := as.ShouldNodeExcludedFromLoadBalancer(localNodeName)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
+ return err
+ }
+ if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
@@ -974,7 +980,7 @@ func (as *availabilitySet) EnsureHostsInPool(service *v1.Service, nodes []*v1.No
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
-func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
+func (as *availabilitySet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
return nil
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
index a3cdaa7ef0..480e89e6d3 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_storageaccount.go
@@ -159,6 +159,18 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
}
}
+ vnetResourceGroup := az.ResourceGroup
+ if len(az.VnetResourceGroup) > 0 {
+ vnetResourceGroup = az.VnetResourceGroup
+ }
+
+ if accountOptions.CreatePrivateEndpoint {
+ // Create DNS zone first, this could make sure driver has write permission on vnetResourceGroup
+ if err := az.createPrivateDNSZone(ctx, vnetResourceGroup); err != nil {
+ return "", "", fmt.Errorf("Failed to create private DNS zone(%s) in resourceGroup(%s), error: %v", PrivateDNSZoneName, vnetResourceGroup, err)
+ }
+ }
+
if createNewAccount {
// set network rules for storage account
var networkRuleSet *storage.NetworkRuleSet
@@ -250,10 +262,6 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
}
if accountOptions.CreatePrivateEndpoint {
- vnetResourceGroup := az.ResourceGroup
- if len(az.VnetResourceGroup) > 0 {
- vnetResourceGroup = az.VnetResourceGroup
- }
// Get properties of the storageAccount
storageAccount, err := az.StorageAccountClient.GetProperties(ctx, resourceGroup, accountName)
if err != nil {
@@ -266,11 +274,6 @@ func (az *Cloud) EnsureStorageAccount(ctx context.Context, accountOptions *Accou
return "", "", fmt.Errorf("Failed to create private endpoint for storage account(%s), resourceGroup(%s), error: %v", accountName, vnetResourceGroup, err)
}
- // Create DNS zone
- if err := az.createPrivateDNSZone(ctx, vnetResourceGroup); err != nil {
- return "", "", fmt.Errorf("Failed to create private DNS zone(%s) in resourceGroup(%s), error: %v", PrivateDNSZoneName, vnetResourceGroup, err)
- }
-
// Create virtual link to the zone private DNS zone
vNetLinkName := accountName + "-vnetlink"
if err := az.createVNetLink(ctx, vNetLinkName, vnetResourceGroup); err != nil {
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
index 1ea88e23ff..ebc025bb1d 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_utils.go
@@ -100,8 +100,12 @@ func convertMapToMapPointer(origin map[string]string) map[string]*string {
}
func parseTags(tags string) map[string]*string {
- kvs := strings.Split(tags, consts.TagsDelimiter)
formatted := make(map[string]*string)
+ if len(tags) == 0 {
+ return formatted
+ }
+
+ kvs := strings.Split(tags, consts.TagsDelimiter)
for _, kv := range kvs {
res := strings.Split(kv, consts.TagKeyValueDelimiter)
if len(res) != 2 {
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
index e654c60673..1de08b649f 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmsets.go
@@ -66,7 +66,7 @@ type VMSet interface {
// participating in the specified LoadBalancer Backend Pool.
EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
- EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error
+ EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
//EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS/VMAS
EnsureBackendPoolDeletedFromVMSets(vmSetNamesMap map[string]bool, backendPoolID string) error
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
index c7b52abbbf..1ccaccd254 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_vmss.go
@@ -76,6 +76,8 @@ type ScaleSet struct {
vmssCache *azcache.TimedCache
vmssVMCache *sync.Map // [resourcegroup/vmssname]*azcache.TimedCache
availabilitySetNodesCache *azcache.TimedCache
+ // lockMap in cache refresh
+ lockMap *lockMap
}
// newScaleSet creates a new ScaleSet.
@@ -93,6 +95,7 @@ func newScaleSet(az *Cloud) (VMSet, error) {
Cloud: az,
availabilitySet: as,
vmssVMCache: &sync.Map{},
+ lockMap: newLockMap(),
}
if !ss.DisableAvailabilitySetNodes {
@@ -183,7 +186,16 @@ func (ss *ScaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.Azur
}
if !found {
- klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", node.nodeName)
+ // lock and try find nodeName from cache again, refresh cache if still not found
+ ss.lockMap.LockEntry(cacheKey)
+ defer ss.lockMap.UnlockEntry(cacheKey)
+ vmssName, instanceID, vm, found, err = getter(node.nodeName, crt)
+ if err == nil && found && vm != nil {
+ klog.V(2).Infof("found VMSS VM with nodeName %s after retry", node.nodeName)
+ return vmssName, instanceID, vm, nil
+ }
+
+ klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache(vmss: %s, rg: %s)", node.nodeName, node.vmssName, node.resourceGroup)
vmssName, instanceID, vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh)
if err != nil {
return "", "", nil, err
@@ -744,11 +756,16 @@ func (ss *ScaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) {
continue
}
- if ss.ShouldNodeExcludedFromLoadBalancer(nodes[nx]) {
+ nodeName := nodes[nx].Name
+ shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(nodeName)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", nodeName, err)
+ return nil, err
+ }
+ if shouldExcludeLoadBalancer {
continue
}
- nodeName := nodes[nx].Name
ssName, _, _, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
if err != nil {
return nil, err
@@ -953,6 +970,11 @@ func (ss *ScaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
vmName := mapNodeNameToVMName(nodeName)
ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
if err != nil {
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ klog.Infof("EnsureHostInPool: skipping node %s because it is not found", vmName)
+ return "", "", "", nil, nil
+ }
+
klog.Errorf("EnsureHostInPool: failed to get VMSS VM %s: %v", vmName, err)
return "", "", "", nil, err
}
@@ -1092,7 +1114,12 @@ func (ss *ScaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back
continue
}
- if ss.ShouldNodeExcludedFromLoadBalancer(node) {
+ shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(node.Name)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", node.Name, err)
+ return err
+ }
+ if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name)
continue
}
@@ -1235,7 +1262,12 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
continue
}
- if ss.ShouldNodeExcludedFromLoadBalancer(node) {
+ shouldExcludeLoadBalancer, err := ss.ShouldNodeExcludedFromLoadBalancer(localNodeName)
+ if err != nil {
+ klog.Errorf("ShouldNodeExcludedFromLoadBalancer(%s) failed with error: %v", localNodeName, err)
+ return err
+ }
+ if shouldExcludeLoadBalancer {
klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName)
continue
}
@@ -1333,6 +1365,11 @@ func (ss *ScaleSet) EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, bac
func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(nodeName, backendPoolID string) (string, string, string, *compute.VirtualMachineScaleSetVM, error) {
ssName, instanceID, vm, err := ss.getVmssVM(nodeName, azcache.CacheReadTypeDefault)
if err != nil {
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ klog.Infof("ensureBackendPoolDeletedFromNode: skipping node %s because it is not found", nodeName)
+ return "", "", "", nil, nil
+ }
+
return "", "", "", nil, err
}
@@ -1461,7 +1498,7 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen
}
// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
-func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool) error {
+func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error {
// Returns nil if backend address pools already deleted.
if backendAddressPools == nil {
return nil
@@ -1507,6 +1544,11 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
continue
}
+ if errors.Is(err, cloudprovider.InstanceNotFound) {
+ klog.Infof("EnsureBackendPoolDeleted(%s): skipping ip config %s because the corresponding vmss vm is not found", getServiceName(service), ipConfigurationID)
+ continue
+ }
+
klog.Errorf("Failed to GetNodeNameByIPConfigurationID(%s): %v", ipConfigurationID, err)
allErrs = append(allErrs, err)
continue
@@ -1570,9 +1612,11 @@ func (ss *ScaleSet) EnsureBackendPoolDeleted(service *v1.Service, backendPoolID,
}
// Ensure the backendPoolID is also deleted on VMSS itself.
- err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
- if err != nil {
- return err
+ if deleteFromVMSet {
+ err := ss.ensureBackendPoolDeletedFromVMSS(service, backendPoolID, vmSetName, ipConfigurationIDs)
+ if err != nil {
+ return err
+ }
}
isOperationSucceeded = true
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
index f9d279906a..32b6fed723 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_wrap.go
@@ -80,6 +80,10 @@ func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCac
}
func (az *Cloud) getRouteTable(crt azcache.AzureCacheReadType) (routeTable network.RouteTable, exists bool, err error) {
+ if len(az.RouteTableName) == 0 {
+ return routeTable, false, fmt.Errorf("Route table name is not configured")
+ }
+
cachedRt, err := az.rtCache.Get(az.RouteTableName, crt)
if err != nil {
return routeTable, false, err
diff --git a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go
index d4abbf12e4..52e178c013 100644
--- a/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go
+++ b/vendor/sigs.k8s.io/cloud-provider-azure/pkg/provider/azure_zones.go
@@ -18,6 +18,7 @@ package provider
import (
"context"
+ "errors"
"fmt"
"os"
"strconv"
@@ -31,6 +32,7 @@ import (
azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache"
"sigs.k8s.io/cloud-provider-azure/pkg/consts"
+ "sigs.k8s.io/cloud-provider-azure/pkg/retry"
)
func (az *Cloud) refreshZones(refreshFunc func() error) {
@@ -43,15 +45,14 @@ func (az *Cloud) refreshZones(refreshFunc func() error) {
}
func (az *Cloud) syncRegionZonesMap() error {
- klog.V(2).Infof("refreshZones: starting to fetch all available zones for the subscription %s", az.SubscriptionID)
+ klog.V(2).Infof("syncRegionZonesMap: starting to fetch all available zones for the subscription %s", az.SubscriptionID)
zones, rerr := az.ZoneClient.GetZones(context.Background(), az.SubscriptionID)
if rerr != nil {
- klog.Warningf("refreshZones: error when get zones: %s, will retry after %s", rerr.Error().Error(), consts.ZoneFetchingInterval.String())
+ klog.Warningf("syncRegionZonesMap: error when get zones: %s, will retry after %s", rerr.Error().Error(), consts.ZoneFetchingInterval.String())
return rerr.Error()
}
if len(zones) == 0 {
- klog.Warningf("refreshZones: empty zone list, will retry after %s", consts.ZoneFetchingInterval.String())
- return fmt.Errorf("empty zone list")
+ klog.Warning("syncRegionZonesMap: empty zone list")
}
az.updateRegionZonesMap(zones)
@@ -89,25 +90,34 @@ func (az *Cloud) getRegionZonesBackoff(region string) ([]string, error) {
klog.V(2).Infof("getRegionZonesMapWrapper: the region-zones map is not initialized successfully, retrying immediately")
+ var (
+ zones map[string][]string
+ rerr *retry.Error
+ )
err := wait.ExponentialBackoff(az.RequestBackoff(), func() (done bool, err error) {
- zones, rerr := az.ZoneClient.GetZones(context.Background(), az.SubscriptionID)
- if len(zones) == 0 || rerr != nil {
- klog.Warningf("getRegionZonesMapWrapper: failed to fetch zones information: %v", rerr.Error())
+ zones, rerr = az.ZoneClient.GetZones(context.Background(), az.SubscriptionID)
+ if rerr != nil {
+ klog.Errorf("getRegionZonesMapWrapper: failed to fetch zones information: %v", rerr.Error())
return false, nil
}
- az.updateRegionZonesMap(zones)
return true, nil
})
- if err != nil {
- return []string{}, fmt.Errorf("cannot get zones information of %s after %d time retry", region, az.RequestBackoff().Steps)
+ if errors.Is(err, wait.ErrWaitTimeout) {
+ return []string{}, rerr.Error()
}
- az.refreshZonesLock.RLock()
- defer az.refreshZonesLock.RUnlock()
+ az.updateRegionZonesMap(zones)
+
+ if len(az.regionZonesMap) != 0 {
+ az.refreshZonesLock.RLock()
+ defer az.refreshZonesLock.RUnlock()
+
+ return az.regionZonesMap[region], nil
+ }
- return az.regionZonesMap[region], nil
+ return []string{}, nil
}
// makeZone returns the zone value in format of -.
@@ -133,13 +143,13 @@ func (az *Cloud) GetZoneID(zoneLabel string) string {
// If the node is not running with availability zones, then it will fall back to fault domain.
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
if az.UseInstanceMetadata {
- metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
+ metadata, err := az.Metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
if err != nil {
return cloudprovider.Zone{}, err
}
if metadata.Compute == nil {
- _ = az.metadata.imsCache.Delete(consts.MetadataCacheKey)
+ _ = az.Metadata.imsCache.Delete(consts.MetadataCacheKey)
return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata")
}
diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore
index e256a31e00..2dc92904ef 100644
--- a/vendor/sigs.k8s.io/yaml/.gitignore
+++ b/vendor/sigs.k8s.io/yaml/.gitignore
@@ -6,6 +6,10 @@
.project
.settings/**
+# Idea files
+.idea/**
+.idea/
+
# Emacs save files
*~
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
index d20e23eff4..54ed8f9cb9 100644
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -1,8 +1,7 @@
language: go
-dist: xenial
-go:
- - 1.12.x
- - 1.13.x
+arch: arm64
+dist: focal
+go: 1.15.x
script:
- diff -u <(echo -n) <(gofmt -d *.go)
- diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
index 5a651d9163..e81cc426be 100644
--- a/vendor/sigs.k8s.io/yaml/README.md
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -107,8 +107,8 @@ func main() {
}
fmt.Println(string(y))
/* Output:
- name: John
age: 30
+ name: John
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod
index 7224f34971..818bbb5193 100644
--- a/vendor/sigs.k8s.io/yaml/go.mod
+++ b/vendor/sigs.k8s.io/yaml/go.mod
@@ -4,5 +4,5 @@ go 1.12
require (
github.com/davecgh/go-spew v1.1.1
- gopkg.in/yaml.v2 v2.2.8
+ gopkg.in/yaml.v2 v2.4.0
)
diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum
index 76e49483af..b7b8cbb104 100644
--- a/vendor/sigs.k8s.io/yaml/go.sum
+++ b/vendor/sigs.k8s.io/yaml/go.sum
@@ -1,9 +1,6 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
-gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=