diff --git a/VERSION b/VERSION index f7280cbb24..119e2e11ea 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.18.7-dev \ No newline at end of file +v1.18.10-dev \ No newline at end of file diff --git a/go.mod b/go.mod index 56269fc9ce..6b506a4072 100644 --- a/go.mod +++ b/go.mod @@ -3,30 +3,30 @@ module sigs.k8s.io/cloud-provider-azure go 1.13 replace ( - // these replacements are pinned to dff82dc0de47 which is the sha associated with the 0.18.6 tag on k/k - // as you cannot pin them to v0.18.6 directly - k8s.io/api => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20200715165012-dff82dc0de47 - k8s.io/apiextensions-apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20200715165012-dff82dc0de47 - k8s.io/apimachinery => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20200715165012-dff82dc0de47 - k8s.io/apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20200715165012-dff82dc0de47 - k8s.io/cli-runtime => k8s.io/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20200715165012-dff82dc0de47 - k8s.io/client-go => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20200715165012-dff82dc0de47 - k8s.io/cloud-provider => k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20200715165012-dff82dc0de47 - k8s.io/cluster-bootstrap => k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20200715165012-dff82dc0de47 - k8s.io/code-generator => k8s.io/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20200715165012-dff82dc0de47 - k8s.io/component-base => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20200715165012-dff82dc0de47 - k8s.io/cri-api => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20200715165012-dff82dc0de47 - k8s.io/csi-translation-lib => k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20200715165012-dff82dc0de47 - k8s.io/kube-aggregator => k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20200715165012-dff82dc0de47 - k8s.io/kube-controller-manager => k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20200715165012-dff82dc0de47 - k8s.io/kube-proxy => k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20200715165012-dff82dc0de47 - k8s.io/kube-scheduler => k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20200715165012-dff82dc0de47 - k8s.io/kubectl => k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20200715165012-dff82dc0de47 - k8s.io/kubelet => k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20200715165012-dff82dc0de47 - k8s.io/legacy-cloud-providers => k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20200715165012-dff82dc0de47 - k8s.io/metrics => k8s.io/kubernetes/staging/src/k8s.io/metrics v0.0.0-20200715165012-dff82dc0de47 - k8s.io/node-api => k8s.io/kubernetes/staging/src/k8s.io/node-api v0.0.0-20200715165012-dff82dc0de47 - k8s.io/sample-apiserver => k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20200715165012-dff82dc0de47 + // these replacements are pinned to 62876fc6d93e which is the sha associated with the 0.18.10 tag on k/k + // as you cannot pin them to v0.18.10 directly + k8s.io/api => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20201015014302-62876fc6d93e + k8s.io/apiextensions-apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20201015014302-62876fc6d93e + k8s.io/apimachinery => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20201015014302-62876fc6d93e + k8s.io/apiserver => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201015014302-62876fc6d93e + k8s.io/cli-runtime => k8s.io/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20201015014302-62876fc6d93e + k8s.io/client-go => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201015014302-62876fc6d93e + k8s.io/cloud-provider => k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201015014302-62876fc6d93e + k8s.io/cluster-bootstrap => k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20201015014302-62876fc6d93e + k8s.io/code-generator => k8s.io/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20201015014302-62876fc6d93e + k8s.io/component-base => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20201015014302-62876fc6d93e + k8s.io/cri-api => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20201015014302-62876fc6d93e + k8s.io/csi-translation-lib => k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20201015014302-62876fc6d93e + k8s.io/kube-aggregator => k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201015014302-62876fc6d93e + k8s.io/kube-controller-manager => k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20201015014302-62876fc6d93e + k8s.io/kube-proxy => k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20201015014302-62876fc6d93e + k8s.io/kube-scheduler => k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20201015014302-62876fc6d93e + k8s.io/kubectl => k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20201015014302-62876fc6d93e + k8s.io/kubelet => k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20201015014302-62876fc6d93e + k8s.io/legacy-cloud-providers => k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201015014302-62876fc6d93e + k8s.io/metrics => k8s.io/kubernetes/staging/src/k8s.io/metrics v0.0.0-20201015014302-62876fc6d93e + k8s.io/node-api => k8s.io/kubernetes/staging/src/k8s.io/node-api v0.0.0-20201015014302-62876fc6d93e + k8s.io/sample-apiserver => k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20201015014302-62876fc6d93e ) require ( @@ -45,7 +45,7 @@ require ( k8s.io/cloud-provider v0.0.0 k8s.io/component-base v0.0.0 k8s.io/klog v1.0.0 - k8s.io/kubernetes v1.18.6 + k8s.io/kubernetes v1.18.10 k8s.io/legacy-cloud-providers v0.0.0 k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 ) diff --git a/go.sum b/go.sum index 1d8b197de6..6962841df0 100644 --- a/go.sum +++ b/go.sum @@ -136,8 +136,8 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -770,41 +770,41 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kubernetes v1.18.6 h1:2rkR3ffvd5YVyPYU4LAUDCKoKQZtjuuj8ga15mbv96o= -k8s.io/kubernetes v1.18.6/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M= -k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20200715165012-dff82dc0de47 h1:l/uxU4KK2L6u1whk4aoSloAOExfKH6byKLccAXlKanQ= -k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20200715165012-dff82dc0de47/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= -k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20200715165012-dff82dc0de47/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= -k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20200715165012-dff82dc0de47 h1:joOXISJUu25AmQsJ/+nvn/czee73FzPRHr3fG7LW0Zk= -k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20200715165012-dff82dc0de47/go.mod h1:0LbhSvBf6oDO/G0IsPYTC3eGykX9kRjGqE1+90am7Pg= -k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20200715165012-dff82dc0de47 h1:IFo8tnb0TY+l8D5QhTT+uBSUwaMRoUonBiZOawGbWbE= -k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20200715165012-dff82dc0de47/go.mod h1:wYoVKxMBc/Gtl3o5eEhoIy1iS0Zw8kLYIak9mud65gg= -k8s.io/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20200715165012-dff82dc0de47/go.mod h1:e0a+/gPy7PnNaRJHZz5E3lqfMsiJ17sSfvktHyipb3I= -k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20200715165012-dff82dc0de47 h1:oxj2RchmMH61NmIgsa+eIY5H7A2659UChFhzDQlr8UI= -k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20200715165012-dff82dc0de47/go.mod h1:Ck7kQmlFASfY0SaqYH1NwUrxeuAipkIbnuHi642eQ+I= -k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20200715165012-dff82dc0de47 h1:QUwGsDuuOqcsVL4AcKPmMx67m9wc0xj7M18qE0ek1oM= -k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20200715165012-dff82dc0de47/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= -k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20200715165012-dff82dc0de47/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= -k8s.io/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20200715165012-dff82dc0de47/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= -k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20200715165012-dff82dc0de47 h1:14FqkdAOzaljwGncDpmNWHy444f5UNZKz2JY5K4vW1I= -k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20200715165012-dff82dc0de47/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= -k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20200715165012-dff82dc0de47 h1:SeTF6Uqlv8mN8PWajqWXL8e8W/Fip0rA5i2UJopqPyg= -k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20200715165012-dff82dc0de47/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= -k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20200715165012-dff82dc0de47 h1:VApjPW5rLjSTVYX12uT5q0aZ3jJnuNrtEyltZwyOOa4= -k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20200715165012-dff82dc0de47/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= -k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20200715165012-dff82dc0de47/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= -k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20200715165012-dff82dc0de47 h1:9Nha6Oxckuw6/Yz4jf1GPUHMThvIaZkHfrT//oUr00M= -k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20200715165012-dff82dc0de47/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= -k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20200715165012-dff82dc0de47/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= -k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20200715165012-dff82dc0de47 h1:py8LUDosqRD33FwgIQj7dfRkQfJVrBaAcNFP43uMN+Y= -k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20200715165012-dff82dc0de47/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= -k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20200715165012-dff82dc0de47 h1:6btTroBc9vyjMr/+hOTkcRDt2Wa5FHO94kQmKKkTxts= -k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20200715165012-dff82dc0de47/go.mod h1:eosbAJR16uuWsgirnmlt31NV+ZwZLQsMNbxiRZYbco8= -k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20200715165012-dff82dc0de47/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= -k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20200715165012-dff82dc0de47 h1:UktVRCPHRdmymgtpWvpJo1BBBXdeGprhRxS1ikhxKKY= -k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20200715165012-dff82dc0de47/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= -k8s.io/kubernetes/staging/src/k8s.io/metrics v0.0.0-20200715165012-dff82dc0de47/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= -k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20200715165012-dff82dc0de47/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= +k8s.io/kubernetes v1.18.10 h1:/AkOf6VAOgZMMa5AAUzXuSEpj+d0phaMxG/s4ASIehY= +k8s.io/kubernetes v1.18.10/go.mod h1:1uB7+7NGbXMLX7XOjMcXnk2Lc1v2J3NcBFDUTn1JkX8= +k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20201015014302-62876fc6d93e h1:CWb+9ZIhwAD1dXQGM+oF1P1zFcqaeLzxDYQ7R3VamZY= +k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20201015014302-62876fc6d93e/go.mod h1:oMzWB6/RPBLYAObltLVSu5Ms1ZztBe7G8s1ni2rZY7w= +k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20201015014302-62876fc6d93e/go.mod h1:BVIYewlEVCukQBRrZR3Kms8GdCsDQBsRIBCoy3rwzMk= +k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20201015014302-62876fc6d93e h1:nRDwIajkEoxMKW5EvlEhb+IQRNJGbNCj3mUF23Zkank= +k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20201015014302-62876fc6d93e/go.mod h1:O0RN84lOQdMpi45vcplXjrN0t9ijoRZZFQcDwzfiN4o= +k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201015014302-62876fc6d93e h1:Bm/U6xgcIDFjgxy2KhwZC9kCWJ6HC6T1e2d1ZF1u6KI= +k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201015014302-62876fc6d93e/go.mod h1:PAkjvu2+ZaJ0h190VdCJCbQjb+QqVk6xlaDlUkkxlxw= +k8s.io/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20201015014302-62876fc6d93e/go.mod h1:tQWQ35D+zCpe30tdF2SPVPsDvRsfnnaV7AfT5iQyoVE= +k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201015014302-62876fc6d93e h1:WpY4W42Xbw+7bFEKI1LPVXawfj1YxAy74vyALikKwJA= +k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201015014302-62876fc6d93e/go.mod h1:BZvMIT9kFoTauzRMi2SzP8eU6nwxgYfyIw2sWpEbFos= +k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201015014302-62876fc6d93e h1:XM2pw/h3PLfDu/q1/Y1mzQuGl4ASrbsD7MuPCkbHaYQ= +k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201015014302-62876fc6d93e/go.mod h1:jW0IWD1v1cNcp/vvXbVuovmZNSieKSZBdM7VmX1lrVI= +k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20201015014302-62876fc6d93e/go.mod h1:oHXhD/NqW/vlYggpTUWbP2x6disww69H1jdsyirbJl8= +k8s.io/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20201015014302-62876fc6d93e/go.mod h1:qBtAbyavqI3lGwEvxrQk9wwUTWntOADx38Iizyn31nw= +k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20201015014302-62876fc6d93e h1:LdRGIUg1e1UOV7iSOCsXod192FEFxcH2aP2IKnDqjIo= +k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20201015014302-62876fc6d93e/go.mod h1:zRlCznOsLYdwq5DB2b/26X/n/04fhV3U3rMC60t80/Q= +k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20201015014302-62876fc6d93e h1:e+vLd7u9UXp2CMn5Yo8bXM15/6c/tTHhTK8qI1HFo/U= +k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20201015014302-62876fc6d93e/go.mod h1:O3AtmT8iqccYwp/fsXdy3h0N9X/yfvRMD2XS48PJrBk= +k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20201015014302-62876fc6d93e h1:MjTTSLJUBL6ywNk2xQaTqPot05YO1F2McsrjHh7jrIg= +k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20201015014302-62876fc6d93e/go.mod h1:/YQL/PqGdoNbC2H+w4tx2zrVdxNb541lW3PA81FdOlE= +k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20201015014302-62876fc6d93e/go.mod h1:NcOKzNVVRhmkQmzCcBHfPPcZqgGXouc/o3Eul3saPj8= +k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20201015014302-62876fc6d93e h1:FPe14kycGxq7488TZyyotqaH7T3+zQ8O0BtmuuM5y6U= +k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20201015014302-62876fc6d93e/go.mod h1:pABoR/v0r2aJLFC1570FaaRJbXyiHhqdGHe5W8nk0XY= +k8s.io/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20201015014302-62876fc6d93e/go.mod h1:GLAmLACy/nOND24DRGKyPH21F89pTcevjPRxEtbLJmU= +k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20201015014302-62876fc6d93e h1:2x1eizIkJGZ3aA41vk7pYXMnWrn5NmCAxfpnFPdNgOY= +k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20201015014302-62876fc6d93e/go.mod h1:UNQ/Ff/Mq9mmCl0MYGl3ciCEIRQr9BT+/DSsoy6/ZMI= +k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20201015014302-62876fc6d93e h1:MyPJK/lt6P+jfR4Ok3f7ZqSESc3S7b9IZICealQwqfg= +k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20201015014302-62876fc6d93e/go.mod h1:fhYoYA0NMwnn7+22+HDfGm0COfeDzxagvttB/vRtotA= +k8s.io/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20201015014302-62876fc6d93e/go.mod h1:Raj75cxSm9NiVBoLk/lB1D4XvpBzTG4WoJ6nIH8Cyew= +k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201015014302-62876fc6d93e h1:pozpQz3grWVK2NdwAFWaruCmud+zV/1UO2oLff+2FH0= +k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201015014302-62876fc6d93e/go.mod h1:R6lK1g14jiec20OVuA1ArvsCKs5th4rxGL3eUMdQmyA= +k8s.io/kubernetes/staging/src/k8s.io/metrics v0.0.0-20201015014302-62876fc6d93e/go.mod h1:xZM9EdJpWjqIWPvLiCP7vYKUEMwIgc0S8nc/MlLVK3Y= +k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20201015014302-62876fc6d93e/go.mod h1:p8OmVbdzpawdZ/r9E1qcdJpzRirEg4OcSg8aZVWqvJo= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml index 2092c72c46..50e4afd19a 100644 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - 1.8 - - 1.7 + - 1.14 + - 1.13 install: - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi @@ -11,6 +11,9 @@ install: script: - go get - go test -cover ./... + - cd ./v5 + - go get + - go test -cover ./... notifications: email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE index 0eb9b72d84..df76d7d771 100644 --- a/vendor/github.com/evanphx/json-patch/LICENSE +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -6,7 +6,7 @@ modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Evan Phoenix nor the names of its contributors diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 9c7f87f7ce..121b039dba 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -1,5 +1,5 @@ # JSON-Patch -`jsonpatch` is a library which provides functionallity for both applying +`jsonpatch` is a library which provides functionality for both applying [RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). @@ -11,10 +11,11 @@ well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ie **Latest and greatest**: ```bash -go get -u github.com/evanphx/json-patch +go get -u github.com/evanphx/json-patch/v5 ``` **Stable Versions**: +* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` * Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` (previous versions below `v3` are unavailable) @@ -82,7 +83,7 @@ When ran, you get the following output: ```bash $ go run main.go patch document: {"height":null,"name":"Jane"} -updated tina doc: {"age":28,"name":"Jane"} +updated alternative doc: {"age":28,"name":"Jane"} ``` ## Create and apply a JSON Patch @@ -164,7 +165,7 @@ func main() { } if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "similar"`) + fmt.Println(`"original" is _not_ structurally equal to "different"`) } } ``` @@ -173,7 +174,7 @@ When ran, you get the following output: ```bash $ go run main.go "original" is structurally equal to "similar" -"original" is _not_ structurally equal to "similar" +"original" is _not_ structurally equal to "different" ``` ## Combine merge patches diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go index 6806c4c200..14e8bb5ce3 100644 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -307,13 +307,16 @@ func matchesValue(av, bv interface{}) bool { return true case map[string]interface{}: bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } + if len(bt) != len(at) { + return false } for key := range bt { - if !matchesValue(at[key], bt[key]) { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { return false } } diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index c9cf590216..f185a45b2c 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -6,6 +6,8 @@ import ( "fmt" "strconv" "strings" + + "github.com/pkg/errors" ) const ( @@ -24,6 +26,14 @@ var ( AccumulatedCopySizeLimit int64 = 0 ) +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + type lazyNode struct { raw *json.RawMessage doc partialDoc @@ -31,10 +41,11 @@ type lazyNode struct { which int } -type operation map[string]*json.RawMessage +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage -// Patch is an ordered collection of operations. -type Patch []operation +// Patch is an ordered collection of Operations. +type Patch []Operation type partialDoc map[string]*lazyNode type partialArray []*lazyNode @@ -59,7 +70,7 @@ func (n *lazyNode) MarshalJSON() ([]byte, error) { case eAry: return json.Marshal(n.ary) default: - return nil, fmt.Errorf("Unknown type") + return nil, ErrUnknownType } } @@ -91,7 +102,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) { } if n.raw == nil { - return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document") + return nil, ErrInvalid } err := json.Unmarshal(*n.raw, &n.doc) @@ -110,7 +121,7 @@ func (n *lazyNode) intoAry() (*partialArray, error) { } if n.raw == nil { - return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array") + return nil, ErrInvalid } err := json.Unmarshal(*n.raw, &n.ary) @@ -191,6 +202,10 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } + if len(n.doc) != len(o.doc) { + return false + } + for k, v := range n.doc { ov, ok := o.doc[k] @@ -198,6 +213,10 @@ func (n *lazyNode) equal(o *lazyNode) bool { return false } + if (v == nil) != (ov == nil) { + return false + } + if v == nil && ov == nil { continue } @@ -227,7 +246,8 @@ func (n *lazyNode) equal(o *lazyNode) bool { return true } -func (o operation) kind() string { +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { if obj, ok := o["op"]; ok && obj != nil { var op string @@ -243,39 +263,41 @@ func (o operation) kind() string { return "unknown" } -func (o operation) path() string { +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { if obj, ok := o["path"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) if err != nil { - return "unknown" + return "unknown", err } - return op + return op, nil } - return "unknown" + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") } -func (o operation) from() string { +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { if obj, ok := o["from"]; ok && obj != nil { var op string err := json.Unmarshal(*obj, &op) if err != nil { - return "unknown" + return "unknown", err } - return op + return op, nil } - return "unknown" + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") } -func (o operation) value() *lazyNode { +func (o Operation) value() *lazyNode { if obj, ok := o["value"]; ok { return newLazyNode(obj) } @@ -283,6 +305,23 @@ func (o operation) value() *lazyNode { return nil } +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + func isArray(buf []byte) bool { Loop: for _, c := range buf { @@ -359,7 +398,7 @@ func (d *partialDoc) get(key string) (*lazyNode, error) { func (d *partialDoc) remove(key string) error { _, ok := (*d)[key] if !ok { - return fmt.Errorf("Unable to remove nonexistent key: %s", key) + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) } delete(*d, key) @@ -385,7 +424,7 @@ func (d *partialArray) add(key string, val *lazyNode) error { idx, err := strconv.Atoi(key) if err != nil { - return err + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) } sz := len(*d) + 1 @@ -395,17 +434,17 @@ func (d *partialArray) add(key string, val *lazyNode) error { cur := *d if idx >= len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - if SupportNegativeIndices { - if idx < -len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - - if idx < 0 { - idx += len(ary) + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } + idx += len(ary) } copy(ary[0:idx], cur[0:idx]) @@ -424,7 +463,7 @@ func (d *partialArray) get(key string) (*lazyNode, error) { } if idx >= len(*d) { - return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } return (*d)[idx], nil @@ -439,17 +478,17 @@ func (d *partialArray) remove(key string) error { cur := *d if idx >= len(cur) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - if SupportNegativeIndices { - if idx < -len(cur) { - return fmt.Errorf("Unable to access invalid index: %d", idx) + if idx < 0 { + if !SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } - - if idx < 0 { - idx += len(cur) + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) } + idx += len(cur) } ary := make([]*lazyNode, len(cur)-1) @@ -462,140 +501,189 @@ func (d *partialArray) remove(key string) error { } -func (p Patch) add(doc *container, op operation) error { - path := op.path() +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) } - return con.add(key, op.value()) + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil } -func (p Patch) remove(doc *container, op operation) error { - path := op.path() +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) } - return con.remove(key) + return nil } -func (p Patch) replace(doc *container, op operation) error { - path := op.path() +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) } _, ok := con.get(key) if ok != nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) } - return con.set(key, op.value()) + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil } -func (p Patch) move(doc *container, op operation) error { - from := op.from() +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } con, key := findObject(doc, from) if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) } val, err := con.get(key) if err != nil { - return err + return errors.Wrapf(err, "error in move for path: '%s'", key) } err = con.remove(key) if err != nil { - return err + return errors.Wrapf(err, "error in move for path: '%s'", key) } - path := op.path() + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } con, key = findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) } - return con.add(key, val) + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil } -func (p Patch) test(doc *container, op operation) error { - path := op.path() +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } con, key := findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) } val, err := con.get(key) - if err != nil { - return err + return errors.Wrapf(err, "error in test for path: '%s'", path) } if val == nil { if op.value().raw == nil { return nil } - return fmt.Errorf("Testing value %s failed", path) + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } else if op.value() == nil { - return fmt.Errorf("Testing value %s failed", path) + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } if val.equal(op.value()) { return nil } - return fmt.Errorf("Testing value %s failed", path) + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) } -func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error { - from := op.from() +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } con, key := findObject(doc, from) if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) } val, err := con.get(key) if err != nil { - return err + return errors.Wrapf(err, "error in copy for from: '%s'", from) } - path := op.path() + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } con, key = findObject(doc, path) if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) } valCopy, sz, err := deepCopy(val) if err != nil { - return err + return errors.Wrapf(err, "error while performing deep copy") } + (*accumulatedCopySize) += int64(sz) if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) } - return con.add(key, valCopy) + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil } // Equal indicates if 2 JSON documents have the same structural equality. @@ -651,7 +739,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { var accumulatedCopySize int64 for _, op := range p { - switch op.kind() { + switch op.Kind() { case "add": err = p.add(&pd, op) case "remove": @@ -665,7 +753,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { case "copy": err = p.copy(&pd, op, &accumulatedCopySize) default: - err = fmt.Errorf("Unexpected kind: %s", op.kind()) + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) } if err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD index 0c4c00d725..24e30d0017 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/BUILD @@ -38,7 +38,7 @@ go_genrule( ], cmd = """ $(location //vendor/k8s.io/code-generator/cmd/set-gen) \ - --input-dirs k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/sets/types \ + --input-dirs k8s.io/apimachinery/pkg/util/sets/types \ --output-base $$(dirname $$(dirname $(location :byte.go))) \ --go-header-file $(location //hack/boilerplate:boilerplate.generatego.txt) \ --output-package sets diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go index a18b51ba00..fcfcc3007e 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/filters/impersonation.go @@ -117,10 +117,37 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime. } } - if !groupsSpecified && username != user.Anonymous { - // When impersonating a non-anonymous user, if no groups were specified - // include the system:authenticated group in the impersonated user info - groups = append(groups, user.AllAuthenticated) + if username != user.Anonymous { + // When impersonating a non-anonymous user, include the 'system:authenticated' group + // in the impersonated user info: + // - if no groups were specified + // - if a group has been specified other than 'system:authenticated' + // + // If 'system:unauthenticated' group has been specified we should not include + // the 'system:authenticated' group. + addAuthenticated := true + for _, group := range groups { + if group == user.AllAuthenticated || group == user.AllUnauthenticated { + addAuthenticated = false + break + } + } + + if addAuthenticated { + groups = append(groups, user.AllAuthenticated) + } + } else { + addUnauthenticated := true + for _, group := range groups { + if group == user.AllUnauthenticated { + addUnauthenticated = false + break + } + } + + if addUnauthenticated { + groups = append(groups, user.AllUnauthenticated) + } } newUser := &user.DefaultInfo{ diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD index 3acc2486a8..3f65000a55 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/BUILD @@ -11,6 +11,7 @@ go_test( srcs = ["healthz_test.go"], embed = [":go_default_library"], deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//staging/src/k8s.io/component-base/metrics/legacyregistry:go_default_library", @@ -31,7 +32,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apiserver/pkg/endpoints/metrics:go_default_library", "//staging/src/k8s.io/apiserver/pkg/server/httplog:go_default_library", - "//staging/src/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go index 75dd887419..a7136be0a3 100644 --- a/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go +++ b/vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go @@ -20,6 +20,7 @@ import ( "bytes" "fmt" "net/http" + "reflect" "strings" "sync" "sync/atomic" @@ -29,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/server/httplog" - "k8s.io/client-go/informers" "k8s.io/klog" ) @@ -82,16 +82,20 @@ func (l *log) Check(_ *http.Request) error { return fmt.Errorf("logging blocked") } +type cacheSyncWaiter interface { + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + type informerSync struct { - sharedInformerFactory informers.SharedInformerFactory + cacheSyncWaiter cacheSyncWaiter } var _ HealthChecker = &informerSync{} -// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given sharedInformerFactory sync. -func NewInformerSyncHealthz(sharedInformerFactory informers.SharedInformerFactory) HealthChecker { +// NewInformerSyncHealthz returns a new HealthChecker that will pass only if all informers in the given cacheSyncWaiter sync. +func NewInformerSyncHealthz(cacheSyncWaiter cacheSyncWaiter) HealthChecker { return &informerSync{ - sharedInformerFactory: sharedInformerFactory, + cacheSyncWaiter: cacheSyncWaiter, } } @@ -104,8 +108,8 @@ func (i *informerSync) Check(_ *http.Request) error { // Close stopCh to force checking if informers are synced now. close(stopCh) - var informersByStarted map[bool][]string - for informerType, started := range i.sharedInformerFactory.WaitForCacheSync(stopCh) { + informersByStarted := make(map[bool][]string) + for informerType, started := range i.cacheSyncWaiter.WaitForCacheSync(stopCh) { informersByStarted[started] = append(informersByStarted[started], informerType.String()) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go index ac92a99a99..0da41f049e 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go @@ -440,6 +440,14 @@ func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Obje func (s *store) Count(key string) (int64, error) { key = path.Join(s.pathPrefix, key) + + // We need to make sure the key ended with "/" so that we only get children "directories". + // e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three, + // while with prefix "/a/" will return only "/a/b" which is the correct answer. + if !strings.HasSuffix(key, "/") { + key += "/" + } + startTime := time.Now() getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly()) metrics.RecordEtcdRequestLatency("listWithCount", key, startTime) @@ -547,7 +555,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor newItemFunc := getNewItemFunc(listObj, v) - var returnedRV, continueRV int64 + var returnedRV, continueRV, withRev int64 var continueKey string switch { case s.pagingEnabled && len(pred.Continue) > 0: @@ -568,7 +576,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor // continueRV==0 is invalid. // If continueRV < 0, the request is for the latest resource version. if continueRV > 0 { - options = append(options, clientv3.WithRev(continueRV)) + withRev = continueRV returnedRV = continueRV } case s.pagingEnabled && pred.Limit > 0: @@ -578,7 +586,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err)) } if fromRV > 0 { - options = append(options, clientv3.WithRev(int64(fromRV))) + withRev = int64(fromRV) } returnedRV = int64(fromRV) } @@ -589,6 +597,9 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor default: options = append(options, clientv3.WithPrefix()) } + if withRev != 0 { + options = append(options, clientv3.WithRev(withRev)) + } // loop until we have filled the requested limit from etcd or there are no more results var lastKey []byte @@ -650,6 +661,10 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor break } key = string(lastKey) + "\x00" + if withRev == 0 { + withRev = returnedRV + options = append(options, clientv3.WithRev(withRev)) + } } // instruct the client to begin querying from immediately after the last key we returned diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go index 5d58211939..b513ce8b32 100644 --- a/vendor/k8s.io/client-go/tools/cache/controller.go +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -138,11 +138,11 @@ func (c *controller) Run(stopCh <-chan struct{}) { c.reflectorMutex.Unlock() var wg wait.Group - defer wg.Wait() wg.StartWithChannel(stopCh, r.Run) wait.Until(c.processLoop, time.Second, stopCh) + wg.Wait() } // Returns true once this controller has completed an initial resource listing diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 58f871f519..b646a28bf3 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -551,5 +551,26 @@ func isExpiredError(err error) bool { } func isTooLargeResourceVersionError(err error) bool { - return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) + if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) { + return true + } + // In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to + // metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource + // version is larger than the largest currently available resource version. To ensure backward + // compatibility with these server versions we also need to detect the error based on the content + // of the error message field. + if !apierrors.IsTimeout(err) { + return false + } + apierr, ok := err.(apierrors.APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return false + } + for _, cause := range apierr.Status().Details.Causes { + // Matches the message returned by api server 1.17.0-1.18.5 for this error condition + if cause.Message == "Too large resource version" { + return true + } + } + return false } diff --git a/vendor/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/cloud-provider/go.sum index 2594ffc362..efcfb6712f 100644 --- a/vendor/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/cloud-provider/go.sum @@ -20,8 +20,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -93,6 +93,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= diff --git a/vendor/k8s.io/csi-translation-lib/go.sum b/vendor/k8s.io/csi-translation-lib/go.sum index bb6a817cae..dbb05555f1 100644 --- a/vendor/k8s.io/csi-translation-lib/go.sum +++ b/vendor/k8s.io/csi-translation-lib/go.sum @@ -20,7 +20,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= @@ -84,6 +84,7 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= diff --git a/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go b/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go index bbdc977cc5..aaefafa372 100644 --- a/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go +++ b/vendor/k8s.io/kubernetes/pkg/client/leaderelectionconfig/config.go @@ -42,7 +42,8 @@ func BindFlags(l *componentbaseconfig.LeaderElectionConfiguration, fs *pflag.Fla "of a leadership. This is only applicable if leader election is enabled.") fs.StringVar(&l.ResourceLock, "leader-elect-resource-lock", l.ResourceLock, ""+ "The type of resource object that is used for locking during "+ - "leader election. Supported options are `endpoints` (default) and `configmaps`.") + "leader election. Supported options are 'endpoints', 'configmaps', "+ + "'leases', 'endpointsleases' and 'configmapsleases'.") fs.StringVar(&l.ResourceName, "leader-elect-resource-name", l.ResourceName, ""+ "The name of resource object that is used for locking during "+ "leader election.") diff --git a/vendor/k8s.io/kubernetes/pkg/controller/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/BUILD index f447f4c4e5..49c0282a7c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/BUILD @@ -20,6 +20,7 @@ go_test( "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 3b9c583465..9a01fa8b84 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -602,7 +602,11 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime return fmt.Errorf("object does not have ObjectMeta, %v", err) } klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) - if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil { + if apierrors.IsNotFound(err) { + klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID) + return err + } r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) return fmt.Errorf("unable to delete pods: %v", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/BUILD index ed3be9c03f..18d312a8ae 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/BUILD @@ -42,6 +42,7 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go index 873258bd48..2e02b13254 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -358,7 +358,8 @@ func (tc *NoExecuteTaintManager) processPodOnNode( minTolerationTime := getMinTolerationTime(usedTolerations) // getMinTolerationTime returns negative value to denote infinite toleration. if minTolerationTime < 0 { - klog.V(4).Infof("New tolerations for %v tolerate forever. Scheduled deletion won't be cancelled if already scheduled.", podNamespacedName.String()) + klog.V(4).Infof("Current tolerations for %v tolerate forever, cancelling any scheduled deletion.", podNamespacedName.String()) + tc.cancelWorkWithEvent(podNamespacedName) return } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go index f53f7567fa..af0ff07f0c 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/scheduling/scheduler_binder.go @@ -61,6 +61,8 @@ const ( ErrReasonBindConflict ConflictReason = "node(s) didn't find available persistent volumes to bind" // ErrReasonNodeConflict is used for VolumeNodeAffinityConflict predicate error. ErrReasonNodeConflict ConflictReason = "node(s) had volume node affinity conflict" + // ErrUnboundImmediatePVC is used when the pod has an unbound PVC in immedate binding mode. + ErrUnboundImmediatePVC ConflictReason = "pod has unbound immediate PersistentVolumeClaims" ) // InTreeToCSITranslator contains methods required to check migratable status @@ -258,7 +260,7 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (reasons Confl // Immediate claims should be bound if len(unboundClaimsImmediate) > 0 { - return nil, fmt.Errorf("pod has unbound immediate PersistentVolumeClaims") + return ConflictReasons{ErrUnboundImmediatePVC}, nil } // Check PV node affinity on bound volumes diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go index bd27ae9f07..dae825b957 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/os.go @@ -38,6 +38,9 @@ type OSInterface interface { Pipe() (r *os.File, w *os.File, err error) ReadDir(dirname string) ([]os.FileInfo, error) Glob(pattern string) ([]string, error) + Open(name string) (*os.File, error) + OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) + Rename(oldpath, newpath string) error } // RealOS is used to dispatch the real system level operations. @@ -105,3 +108,18 @@ func (RealOS) ReadDir(dirname string) ([]os.FileInfo, error) { func (RealOS) Glob(pattern string) ([]string, error) { return filepath.Glob(pattern) } + +// Open will call os.Open to return the file. +func (RealOS) Open(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFile will call os.OpenFile to return the file. +func (RealOS) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// Rename will call os.Rename to rename a file. +func (RealOS) Rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS deleted file mode 100644 index 7742eed636..0000000000 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dashpole -- sjenning diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go b/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go index 5efcbc82b2..1f2bcdb1af 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/listers/listers.go @@ -41,6 +41,8 @@ type NodeInfoLister interface { List() ([]*schedulernodeinfo.NodeInfo, error) // Returns the list of NodeInfos of nodes with pods with affinity terms. HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) + // Returns the list of NodeInfos of nodes with pods with required anti-affinity terms. + HavePodsWithRequiredAntiAffinityList() ([]*schedulernodeinfo.NodeInfo, error) // Returns the NodeInfo of the given node name. Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go b/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go index 085b65188c..22dc175951 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/nodeinfo/node_info.go @@ -49,9 +49,10 @@ type NodeInfo struct { // Overall node information. node *v1.Node - pods []*v1.Pod - podsWithAffinity []*v1.Pod - usedPorts HostPortInfo + pods []*v1.Pod + podsWithAffinity []*v1.Pod + podsWithRequiredAntiAffinity []*v1.Pod + usedPorts HostPortInfo // Total requested resources of all pods on this node. This includes assumed // pods, which scheduler has sent for binding, but may not be scheduled yet. @@ -339,6 +340,14 @@ func (n *NodeInfo) PodsWithAffinity() []*v1.Pod { return n.podsWithAffinity } +// PodsWithRequiredAntiAffinity return all pods with required anti-affinity constraints on this node. +func (n *NodeInfo) PodsWithRequiredAntiAffinity() []*v1.Pod { + if n == nil { + return nil + } + return n.podsWithRequiredAntiAffinity +} + // AllowedPodNumber returns the number of the allowed pods on this node. func (n *NodeInfo) AllowedPodNumber() int { if n == nil || n.allocatableResource == nil { @@ -445,6 +454,9 @@ func (n *NodeInfo) Clone() *NodeInfo { if len(n.podsWithAffinity) > 0 { clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...) } + if len(n.podsWithRequiredAntiAffinity) > 0 { + clone.podsWithRequiredAntiAffinity = append([]*v1.Pod(nil), n.podsWithRequiredAntiAffinity...) + } if len(n.taints) > 0 { clone.taints = append([]v1.Taint(nil), n.taints...) } @@ -477,6 +489,11 @@ func hasPodAffinityConstraints(pod *v1.Pod) bool { return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil) } +func hasRequiredPodAntiAffinityConstraints(pod *v1.Pod) bool { + affinity := pod.Spec.Affinity + return affinity != nil && affinity.PodAntiAffinity != nil && len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 +} + // AddPod adds pod information to this NodeInfo. func (n *NodeInfo) AddPod(pod *v1.Pod) { res, non0CPU, non0Mem := calculateResource(pod) @@ -495,6 +512,9 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) { if hasPodAffinityConstraints(pod) { n.podsWithAffinity = append(n.podsWithAffinity, pod) } + if hasRequiredPodAntiAffinityConstraints(pod) { + n.podsWithRequiredAntiAffinity = append(n.podsWithRequiredAntiAffinity, pod) + } // Consume ports when pods added. n.UpdateUsedPorts(pod, true) @@ -502,33 +522,45 @@ func (n *NodeInfo) AddPod(pod *v1.Pod) { n.generation = nextGeneration() } -// RemovePod subtracts pod information from this NodeInfo. -func (n *NodeInfo) RemovePod(pod *v1.Pod) error { - k1, err := GetPodKey(pod) - if err != nil { - return err - } - - for i := range n.podsWithAffinity { - k2, err := GetPodKey(n.podsWithAffinity[i]) +func removeFromSlice(s []*v1.Pod, k string) []*v1.Pod { + for i := range s { + k2, err := GetPodKey(s[i]) if err != nil { klog.Errorf("Cannot get pod key, err: %v", err) continue } - if k1 == k2 { + if k == k2 { // delete the element - n.podsWithAffinity[i] = n.podsWithAffinity[len(n.podsWithAffinity)-1] - n.podsWithAffinity = n.podsWithAffinity[:len(n.podsWithAffinity)-1] + s[i] = s[len(s)-1] + s = s[:len(s)-1] break } } + return s + +} + +// RemovePod subtracts pod information from this NodeInfo. +func (n *NodeInfo) RemovePod(pod *v1.Pod) error { + k, err := GetPodKey(pod) + if err != nil { + return err + } + + if hasPodAffinityConstraints(pod) { + n.podsWithAffinity = removeFromSlice(n.podsWithAffinity, k) + } + if hasRequiredPodAntiAffinityConstraints(pod) { + n.podsWithRequiredAntiAffinity = removeFromSlice(n.podsWithRequiredAntiAffinity, k) + } + for i := range n.pods { k2, err := GetPodKey(n.pods[i]) if err != nil { klog.Errorf("Cannot get pod key, err: %v", err) continue } - if k1 == k2 { + if k == k2 { // delete the element n.pods[i] = n.pods[len(n.pods)-1] n.pods = n.pods[:len(n.pods)-1] @@ -563,6 +595,9 @@ func (n *NodeInfo) resetSlicesIfEmpty() { if len(n.podsWithAffinity) == 0 { n.podsWithAffinity = nil } + if len(n.podsWithRequiredAntiAffinity) == 0 { + n.podsWithRequiredAntiAffinity = nil + } if len(n.pods) == 0 { n.pods = nil } @@ -646,6 +681,12 @@ func (n *NodeInfo) SetNode(node *v1.Node) error { return nil } +// RemoveNode removes the node object, leaving all other tracking information. +func (n *NodeInfo) RemoveNode() { + n.node = nil + n.generation = nextGeneration() +} + // FilterOutPods receives a list of pods and filters out those whose node names // are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo. // diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go index fdbd901680..206c6704fd 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go @@ -23,6 +23,7 @@ import ( "syscall" "os" + "time" v1 "k8s.io/api/core/v1" utilfeature "k8s.io/apiserver/pkg/util/feature" @@ -46,7 +47,10 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64, fsGroupChangePolicy *v1 fsGroupPolicyEnabled := utilfeature.DefaultFeatureGate.Enabled(features.ConfigurableFSGroupPolicy) - klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath()) + timer := time.AfterFunc(30*time.Second, func() { + klog.Warningf("Setting volume ownership for %s and fsGroup set. If the volume has a lot of files then setting volume ownership could be slow, see https://github.com/kubernetes/kubernetes/issues/69699", mounter.GetPath()) + }) + defer timer.Stop() // This code exists for legacy purposes, so as old behaviour is entirely preserved when feature gate is disabled // TODO: remove this when ConfigurableFSGroupPolicy turns GA. diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index 2ba9a59526..77643eb954 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -320,25 +320,6 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout) } -// findAvailableNamespaceName random namespace name starting with baseName. -func findAvailableNamespaceName(baseName string, c clientset.Interface) (string, error) { - var name string - err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) { - name = fmt.Sprintf("%v-%v", baseName, RandomSuffix()) - _, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) - if err == nil { - // Already taken - return false, nil - } - if apierrors.IsNotFound(err) { - return true, nil - } - Logf("Unexpected error while getting namespace: %v", err) - return false, nil - }) - return name, err -} - // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // Please see NewFramework instead of using this directly. func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { @@ -350,10 +331,7 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s // We don't use ObjectMeta.GenerateName feature, as in case of API call // failure we don't know whether the namespace was created and what is its // name. - name, err := findAvailableNamespaceName(baseName, c) - if err != nil { - return nil, err - } + name := fmt.Sprintf("%v-%v", baseName, RandomSuffix()) namespaceObj := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -369,7 +347,13 @@ func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]s var err error got, err = c.CoreV1().Namespaces().Create(context.TODO(), namespaceObj, metav1.CreateOptions{}) if err != nil { - Logf("Unexpected error while creating namespace: %v", err) + if apierrors.IsAlreadyExists(err) { + // regenerate on conflict + Logf("Namespace name %q was already taken, generate a new name and retry", namespaceObj.Name) + namespaceObj.Name = fmt.Sprintf("%v-%v", baseName, RandomSuffix()) + } else { + Logf("Unexpected error while creating namespace: %v", err) + } return false, nil } return true, nil diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index e4bad51f65..6ca964a77d 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -31,14 +31,15 @@ type RegistryList struct { DockerLibraryRegistry string `yaml:"dockerLibraryRegistry"` DockerGluster string `yaml:"dockerGluster"` E2eRegistry string `yaml:"e2eRegistry"` + E2eVolumeRegistry string `yaml:"e2eVolumeRegistry"` PromoterE2eRegistry string `yaml:"promoterE2eRegistry"` InvalidRegistry string `yaml:"invalidRegistry"` GcRegistry string `yaml:"gcRegistry"` + SigStorageRegistry string `yaml:"sigStorageRegistry"` GcrReleaseRegistry string `yaml:"gcrReleaseRegistry"` GoogleContainerRegistry string `yaml:"googleContainerRegistry"` PrivateRegistry string `yaml:"privateRegistry"` SampleRegistry string `yaml:"sampleRegistry"` - K8sCSI string `yaml:"k8sCSI"` } // Config holds an images registry, name, and version @@ -69,15 +70,16 @@ func initReg() RegistryList { DockerLibraryRegistry: "docker.io/library", DockerGluster: "docker.io/gluster", E2eRegistry: "gcr.io/kubernetes-e2e-test-images", + E2eVolumeRegistry: "gcr.io/kubernetes-e2e-test-images/volume", // TODO: After the domain flip, this should instead be k8s.gcr.io/k8s-artifacts-prod/e2e-test-images PromoterE2eRegistry: "us.gcr.io/k8s-artifacts-prod/e2e-test-images", InvalidRegistry: "invalid.com/invalid", GcRegistry: "k8s.gcr.io", + SigStorageRegistry: "k8s.gcr.io/sig-storage", GcrReleaseRegistry: "gcr.io/gke-release", GoogleContainerRegistry: "gcr.io/google-containers", PrivateRegistry: "gcr.io/k8s-authenticated-test", SampleRegistry: "gcr.io/google-samples", - K8sCSI: "gcr.io/k8s-staging-csi", } repoList := os.Getenv("KUBE_TEST_REPO_LIST") if repoList == "" { @@ -101,13 +103,14 @@ var ( dockerLibraryRegistry = registry.DockerLibraryRegistry dockerGluster = registry.DockerGluster e2eRegistry = registry.E2eRegistry + e2eVolumeRegistry = registry.E2eVolumeRegistry promoterE2eRegistry = registry.PromoterE2eRegistry gcAuthenticatedRegistry = registry.GcAuthenticatedRegistry gcRegistry = registry.GcRegistry + sigStorageRegistry = registry.SigStorageRegistry gcrReleaseRegistry = registry.GcrReleaseRegistry googleContainerRegistry = registry.GoogleContainerRegistry invalidRegistry = registry.InvalidRegistry - k8sCSI = registry.K8sCSI // PrivateRegistry is an image repository that requires authentication PrivateRegistry = registry.PrivateRegistry sampleRegistry = registry.SampleRegistry @@ -224,7 +227,7 @@ func initImageConfigs() map[int]Config { configs[Mounttest] = Config{e2eRegistry, "mounttest", "1.0"} configs[MounttestUser] = Config{e2eRegistry, "mounttest-user", "1.0"} configs[Nautilus] = Config{e2eRegistry, "nautilus", "1.0"} - configs[NFSProvisioner] = Config{k8sCSI, "nfs-provisioner", "v2.2.2"} + configs[NFSProvisioner] = Config{sigStorageRegistry, "nfs-provisioner", "v2.2.2"} configs[Nginx] = Config{dockerLibraryRegistry, "nginx", "1.14-alpine"} configs[NginxNew] = Config{dockerLibraryRegistry, "nginx", "1.15-alpine"} configs[Nonewprivs] = Config{e2eRegistry, "nonewprivs", "1.0"} @@ -239,10 +242,10 @@ func initImageConfigs() map[int]Config { configs[ResourceConsumer] = Config{e2eRegistry, "resource-consumer", "1.5"} configs[SdDummyExporter] = Config{gcRegistry, "sd-dummy-exporter", "v0.2.0"} configs[StartupScript] = Config{googleContainerRegistry, "startup-script", "v1"} - configs[VolumeNFSServer] = Config{e2eRegistry, "volume/nfs", "1.0"} - configs[VolumeISCSIServer] = Config{e2eRegistry, "volume/iscsi", "2.0"} - configs[VolumeGlusterServer] = Config{e2eRegistry, "volume/gluster", "1.0"} - configs[VolumeRBDServer] = Config{e2eRegistry, "volume/rbd", "1.0.1"} + configs[VolumeNFSServer] = Config{e2eVolumeRegistry, "nfs", "1.0"} + configs[VolumeISCSIServer] = Config{e2eVolumeRegistry, "iscsi", "2.0"} + configs[VolumeGlusterServer] = Config{e2eVolumeRegistry, "gluster", "1.0"} + configs[VolumeRBDServer] = Config{e2eVolumeRegistry, "rbd", "1.0.1"} return configs } @@ -280,8 +283,12 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) { switch registryAndUser { case "gcr.io/kubernetes-e2e-test-images": registryAndUser = e2eRegistry + case "gcr.io/kubernetes-e2e-test-images/volume": + registryAndUser = e2eVolumeRegistry case "k8s.gcr.io": registryAndUser = gcRegistry + case "k8s.gcr.io/sig-storage": + registryAndUser = sigStorageRegistry case "gcr.io/k8s-authenticated-test": registryAndUser = PrivateRegistry case "gcr.io/google-samples": @@ -290,8 +297,6 @@ func ReplaceRegistryInImageURL(imageURL string) (string, error) { registryAndUser = gcrReleaseRegistry case "docker.io/library": registryAndUser = dockerLibraryRegistry - case "gcr.io/k8s-staging-csi": - registryAndUser = k8sCSI default: if countParts == 1 { // We assume we found an image from docker hub library diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go index 4c0de1e29c..f5f73c3b89 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure.go @@ -241,7 +241,7 @@ type Cloud struct { ResourceRequestBackoff wait.Backoff metadata *InstanceMetadataService - vmSet VMSet + VMSet VMSet // ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates ipv6DualStackEnabled bool @@ -384,7 +384,7 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro // No credentials provided, useInstanceMetadata should be enabled for Kubelet. // TODO(feiskyer): print different error message for Kubelet and controller-manager, as they're // requiring different credential settings. - if !config.UseInstanceMetadata && az.Config.CloudConfigType == cloudConfigTypeFile { + if !config.UseInstanceMetadata && config.CloudConfigType == cloudConfigTypeFile { return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials") } @@ -484,12 +484,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro } if strings.EqualFold(vmTypeVMSS, az.Config.VMType) { - az.vmSet, err = newScaleSet(az) + az.VMSet, err = newScaleSet(az) if err != nil { return err } } else { - az.vmSet = newAvailabilitySet(az) + az.VMSet = newAvailabilitySet(az) } az.vmCache, err = az.newVMCache() diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index d17bb0cb5e..e5e0c98614 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -111,7 +111,7 @@ func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]st var privateIPs []string err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { var retryErr error - privateIPs, retryErr = az.vmSet.GetPrivateIPsByNodeName(string(nodeName)) + privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName)) if retryErr != nil { // won't retry since the instance doesn't exist on Azure. if retryErr == cloudprovider.InstanceNotFound { @@ -135,7 +135,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, var ip, publicIP string err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { var retryErr error - ip, publicIP, retryErr = az.vmSet.GetIPByNodeName(string(name)) + ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name)) if retryErr != nil { klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) return false, nil diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index e36bd3e718..e0bd5bf42f 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -49,7 +49,9 @@ const ( errLeaseFailed = "AcquireDiskLeaseFailed" errLeaseIDMissing = "LeaseIdMissing" errContainerNotFound = "ContainerNotFound" - errDiskBlobNotFound = "DiskBlobNotFound" + errStatusCode400 = "statuscode=400" + errInvalidParameter = `code="invalidparameter"` + errTargetInstanceIds = `target="instanceids"` sourceSnapshot = "snapshot" sourceVolume = "volume" @@ -90,15 +92,15 @@ type controllerCommon struct { // getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type. func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) { - // 1. vmType is standard, return cloud.vmSet directly. + // 1. vmType is standard, return cloud.VMSet directly. if c.cloud.VMType == vmTypeStandard { - return c.cloud.vmSet, nil + return c.cloud.VMSet, nil } // 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet. - ss, ok := c.cloud.vmSet.(*scaleSet) + ss, ok := c.cloud.VMSet.(*scaleSet) if !ok { - return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType) + return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType) } // 3. If the node is managed by availability set, then return ss.availabilitySet. @@ -214,24 +216,32 @@ func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.N c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName))) - if err != nil && retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff { - klog.V(2).Infof("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) - retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) { - c.vmLockMap.LockEntry(strings.ToLower(string(nodeName))) - c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching") - err := vmset.DetachDisk(diskName, diskURI, nodeName) - c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) - c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName))) - - retriable := false - if err != nil && retry.IsErrorRetriable(err) { - retriable = true + if err != nil { + if isInstanceNotFoundError(err) { + // if host doesn't exist, no need to detach + klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached", + err, diskURI) + return nil + } + if retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff { + klog.Warningf("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err) + retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) { + c.vmLockMap.LockEntry(strings.ToLower(string(nodeName))) + c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching") + err := vmset.DetachDisk(diskName, diskURI, nodeName) + c.diskAttachDetachMap.Delete(strings.ToLower(diskURI)) + c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName))) + + retriable := false + if err != nil && retry.IsErrorRetriable(err) { + retriable = true + } + return !retriable, err + }) + if retryErr != nil { + err = retryErr + klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err) } - return !retriable, err - }) - if retryErr != nil { - err = retryErr - klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err) } } if err != nil { @@ -426,3 +436,8 @@ func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourc SourceResourceID: &sourceResourceID, }, nil } + +func isInstanceNotFoundError(err error) bool { + errMsg := strings.ToLower(err.Error()) + return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds) +} diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go index 0c5f95e9c2..795622d475 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_standard.go @@ -142,7 +142,11 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N (disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { // found the disk klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) - disks[i].ToBeDetached = to.BoolPtr(true) + if strings.EqualFold(as.cloud.Environment.Name, "AZURESTACKCLOUD") { + disks = append(disks[:i], disks[i+1:]...) + } else { + disks[i].ToBeDetached = to.BoolPtr(true) + } bFoundDisk = true break } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index 7757741113..42908f72dd 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -147,7 +147,11 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName (disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { // found the disk klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) - disks[i].ToBeDetached = to.BoolPtr(true) + if strings.EqualFold(ss.cloud.Environment.Name, "AZURESTACKCLOUD") { + disks = append(disks[:i], disks[i+1:]...) + } else { + disks[i].ToBeDetached = to.BoolPtr(true) + } bFoundDisk = true break } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 696a0beabc..ca44eb678b 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -761,6 +761,14 @@ func (fDC *fakeDisksClient) Get(ctx context.Context, resourceGroupName string, d errors.New("Not such Disk")) } +func (fDC *fakeDisksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { + return []compute.Disk{}, nil +} + +func (fDC *fakeDisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error { + return nil +} + // GetTestCloud returns a fake azure cloud for unit tests in Azure related CSI drivers func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { az = &Cloud{ @@ -800,7 +808,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { az.VirtualMachineScaleSetsClient = newFakeVirtualMachineScaleSetsClient() az.VirtualMachineScaleSetVMsClient = newFakeVirtualMachineScaleSetVMsClient() az.VirtualMachinesClient = newFakeAzureVirtualMachinesClient() - az.vmSet = newAvailabilitySet(az) + az.VMSet = newAvailabilitySet(az) az.vmCache, _ = az.newVMCache() az.lbCache, _ = az.newLBCache() az.nsgCache, _ = az.newNSGCache() diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go index 91d5a59c94..8a3e50c88a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -94,11 +94,11 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N // Not local instance, get addresses from Azure ARM API. if !isLocalInstance { - if az.vmSet != nil { + if az.VMSet != nil { return addressGetter(name) } - // vmSet == nil indicates credentials are not provided. + // VMSet == nil indicates credentials are not provided. return nil, fmt.Errorf("no credentials provided for Azure cloud provider") } @@ -164,7 +164,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin return nil, nil } - name, err := az.vmSet.GetNodeNameByProviderID(providerID) + name, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { return nil, err } @@ -185,7 +185,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri return true, nil } - name, err := az.vmSet.GetNodeNameByProviderID(providerID) + name, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { if err == cloudprovider.InstanceNotFound { return false, nil @@ -210,7 +210,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, nil } - nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID) + nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { // Returns false, so the controller manager will continue to check InstanceExistsByProviderID(). if err == cloudprovider.InstanceNotFound { @@ -220,7 +220,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, err } - powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName)) + powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName)) if err != nil { // Returns false, so the controller manager will continue to check InstanceExistsByProviderID(). if err == cloudprovider.InstanceNotFound { @@ -287,8 +287,8 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e // Not local instance, get instanceID from Azure ARM API. if !isLocalInstance { - if az.vmSet != nil { - return az.vmSet.GetInstanceIDByNodeName(nodeName) + if az.VMSet != nil { + return az.VMSet.GetInstanceIDByNodeName(nodeName) } // vmSet == nil indicates credentials are not provided. @@ -317,7 +317,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e return az.getVmssMachineID(subscriptionID, resourceGroup, ssName, instanceID), nil } - return az.vmSet.GetInstanceIDByNodeName(nodeName) + return az.VMSet.GetInstanceIDByNodeName(nodeName) } // InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID @@ -334,7 +334,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string return "", nil } - name, err := az.vmSet.GetNodeNameByProviderID(providerID) + name, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { return "", err } @@ -372,8 +372,8 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, return "", err } if !isLocalInstance { - if az.vmSet != nil { - return az.vmSet.GetInstanceTypeByNodeName(string(name)) + if az.VMSet != nil { + return az.VMSet.GetInstanceTypeByNodeName(string(name)) } // vmSet == nil indicates credentials are not provided. @@ -385,7 +385,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, } } - return az.vmSet.GetInstanceTypeByNodeName(string(name)) + return az.VMSet.GetInstanceTypeByNodeName(string(name)) } // AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 9938651264..5cc7a415ce 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -36,6 +36,7 @@ import ( servicehelpers "k8s.io/cloud-provider/service/helpers" "k8s.io/klog" azcache "k8s.io/legacy-cloud-providers/azure/cache" + "k8s.io/legacy-cloud-providers/azure/retry" utilnet "k8s.io/utils/net" ) @@ -216,7 +217,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri klog.V(5).Infof("Delete service (%s): START clusterName=%q", serviceName, clusterName) serviceIPToCleanup, err := az.findServiceIPAddress(ctx, clusterName, service, isInternal) - if err != nil { + if err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } @@ -225,7 +226,7 @@ func (az *Cloud) EnsureLoadBalancerDeleted(ctx context.Context, clusterName stri return err } - if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil { + if _, err := az.reconcileLoadBalancer(clusterName, service, nil, false /* wantLb */); err != nil && !retry.HasStatusForbiddenOrIgnoredError(err) { return err } @@ -258,7 +259,7 @@ func (az *Cloud) getLoadBalancerResourceGroup() string { func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) { isInternal := requiresInternalLoadBalancer(service) var defaultLB *network.LoadBalancer - primaryVMSetName := az.vmSet.GetPrimaryVMSetName() + primaryVMSetName := az.VMSet.GetPrimaryVMSetName() defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal) existingLBs, err := az.ListLB(service) @@ -331,7 +332,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) - vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) + vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes) if err != nil { klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err @@ -937,7 +938,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName) - err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) + err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err) return nil, err @@ -981,7 +982,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) // Etag would be changed when updating backend pools, so invalidate lbCache after it. defer az.lbCache.Delete(lbName) - err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) + err := az.VMSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) if err != nil { return nil, err } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go index c4ed30da2b..10154a13d4 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_managedDiskController.go @@ -287,11 +287,15 @@ func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quan return newSizeQuant, nil } - result.DiskProperties.DiskSizeGB = &requestGiB + diskParameter := compute.DiskUpdate{ + DiskUpdateProperties: &compute.DiskUpdateProperties{ + DiskSizeGB: &requestGiB, + }, + } ctx, cancel = getContextWithCancel() defer cancel() - if rerr := c.common.cloud.DisksClient.CreateOrUpdate(ctx, resourceGroup, diskName, result); rerr != nil { + if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil { return oldSize, rerr.Error() } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go index 5920ed22aa..d22edf0db4 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -135,7 +135,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string { func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) { vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix) if strings.EqualFold(clusterName, vmSetName) { - vmSetName = az.vmSet.GetPrimaryVMSetName() + vmSetName = az.VMSet.GetPrimaryVMSetName() } return vmSetName @@ -150,7 +150,7 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, clusterName = az.LoadBalancerName } lbNamePrefix := vmSetName - if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() { + if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() { lbNamePrefix = clusterName } if isInternal { @@ -732,7 +732,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types. return "", "", "", nil, nil } - klog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) + klog.Errorf("error: az.EnsureHostInPool(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) return "", "", "", nil, err } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index c4fec3a940..c6a7978836 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -61,6 +61,13 @@ type vmssMetaInfo struct { resourceGroup string } +// nodeIdentity identifies a node within a subscription. +type nodeIdentity struct { + resourceGroup string + vmssName string + nodeName string +} + // scaleSet implements VMSet interface for Azure scale set. type scaleSet struct { *Cloud @@ -70,7 +77,7 @@ type scaleSet struct { availabilitySet VMSet vmssCache *azcache.TimedCache - vmssVMCache *azcache.TimedCache + vmssVMCache *sync.Map // [resourcegroup/vmssname]*azcache.TimedCache availabilitySetNodesCache *azcache.TimedCache } @@ -80,6 +87,7 @@ func newScaleSet(az *Cloud) (VMSet, error) { ss := &scaleSet{ Cloud: az, availabilitySet: newAvailabilitySet(az), + vmssVMCache: &sync.Map{}, } if !ss.DisableAvailabilitySetNodes { @@ -94,11 +102,6 @@ func newScaleSet(az *Cloud) (VMSet, error) { return nil, err } - ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache() - if err != nil { - return nil, err - } - return ss, nil } @@ -139,12 +142,17 @@ func (ss *scaleSet) getVMSS(vmssName string, crt azcache.AzureCacheReadType) (*c return vmss, nil } -// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. -// It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. -func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { +// getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache. +// Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity. +func (ss *scaleSet) getVmssVMByNodeIdentity(node *nodeIdentity, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { + cacheKey, cache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName) + if err != nil { + return "", "", nil, err + } + getter := func(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, bool, error) { var found bool - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) + cached, err := cache.Get(cacheKey, crt) if err != nil { return "", "", nil, found, err } @@ -159,19 +167,19 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) ( return "", "", nil, found, nil } - _, err := getScaleSetVMInstanceID(nodeName) + _, err = getScaleSetVMInstanceID(node.nodeName) if err != nil { return "", "", nil, err } - vmssName, instanceID, vm, found, err := getter(nodeName, crt) + vmssName, instanceID, vm, found, err := getter(node.nodeName, crt) if err != nil { return "", "", nil, err } if !found { - klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName) - vmssName, instanceID, vm, found, err = getter(nodeName, azcache.CacheReadTypeForceRefresh) + klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", node.nodeName) + vmssName, instanceID, vm, found, err = getter(node.nodeName, azcache.CacheReadTypeForceRefresh) if err != nil { return "", "", nil, err } @@ -187,6 +195,17 @@ func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) ( return vmssName, instanceID, vm, nil } +// getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. +// Returns cloudprovider.InstanceNotFound if nodeName does not belong to any scale set. +func (ss *scaleSet) getVmssVM(nodeName string, crt azcache.AzureCacheReadType) (string, string, *compute.VirtualMachineScaleSetVM, error) { + node, err := ss.getNodeIdentityByNodeName(nodeName, crt) + if err != nil { + return "", "", nil, err + } + + return ss.getVmssVMByNodeIdentity(node, crt) +} + // GetPowerStatusByNodeName returns the power state of the specified node. func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, err error) { managedByAS, err := ss.isNodeManagedByAvailabilitySet(name, azcache.CacheReadTypeUnsafe) @@ -222,8 +241,13 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*compute.VirtualMachineScaleSetVM, error) { + cacheKey, cache, err := ss.getVMSSVMCache(resourceGroup, scaleSetName) + if err != nil { + return nil, err + } + getter := func(crt azcache.AzureCacheReadType) (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, crt) + cached, err := cache.Get(cacheKey, crt) if err != nil { return nil, false, err } @@ -259,6 +283,13 @@ func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceI if found && vm != nil { return vm, nil } + if found && vm == nil { + klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache if it is expired", scaleSetName, instanceID) + vm, found, err = getter(azcache.CacheReadTypeDefault) + if err != nil { + return nil, err + } + } if !found || vm == nil { return nil, cloudprovider.InstanceNotFound } @@ -500,17 +531,21 @@ func (ss *scaleSet) GetPrivateIPsByNodeName(nodeName string) ([]string, error) { // This returns the full identifier of the primary NIC for the given VM. func (ss *scaleSet) getPrimaryInterfaceID(machine compute.VirtualMachineScaleSetVM) (string, error) { + if machine.NetworkProfile == nil || machine.NetworkProfile.NetworkInterfaces == nil { + return "", fmt.Errorf("failed to find the network interfaces for vm %s", to.String(machine.Name)) + } + if len(*machine.NetworkProfile.NetworkInterfaces) == 1 { return *(*machine.NetworkProfile.NetworkInterfaces)[0].ID, nil } for _, ref := range *machine.NetworkProfile.NetworkInterfaces { - if *ref.Primary { + if to.Bool(ref.Primary) { return *ref.ID, nil } } - return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", *machine.Name) + return "", fmt.Errorf("failed to find a primary nic for the vm. vmname=%q", to.String(machine.Name)) } // getVmssMachineID returns the full identifier of a vmss virtual machine. @@ -560,29 +595,64 @@ func extractResourceGroupByProviderID(providerID string) (string, error) { return matches[1], nil } -// listScaleSets lists all scale sets. -func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { - ctx, cancel := getContextWithCancel() - defer cancel() +// getNodeIdentityByNodeName use the VMSS cache to find a node's resourcegroup and vmss, returned in a nodeIdentity. +func (ss *scaleSet) getNodeIdentityByNodeName(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) { + getter := func(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) { + node := &nodeIdentity{ + nodeName: nodeName, + } - allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(ctx, resourceGroup) - if rerr != nil { - klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr) - return nil, rerr.Error() + cached, err := ss.vmssCache.Get(vmssKey, crt) + if err != nil { + return nil, err + } + + vmsses := cached.(*sync.Map) + vmsses.Range(func(key, value interface{}) bool { + v := value.(*vmssEntry) + if v.vmss.Name == nil { + return true + } + + vmssPrefix := *v.vmss.Name + if v.vmss.VirtualMachineProfile != nil && + v.vmss.VirtualMachineProfile.OsProfile != nil && + v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix != nil { + vmssPrefix = *v.vmss.VirtualMachineProfile.OsProfile.ComputerNamePrefix + } + + if strings.EqualFold(vmssPrefix, nodeName[:len(nodeName)-6]) { + node.vmssName = *v.vmss.Name + node.resourceGroup = v.resourceGroup + return false + } + + return true + }) + return node, nil } - ssNames := make([]string, 0) - for _, vmss := range allScaleSets { - name := *vmss.Name - if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 { - klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name) - continue - } + if _, err := getScaleSetVMInstanceID(nodeName); err != nil { + return nil, err + } - ssNames = append(ssNames, name) + node, err := getter(nodeName, crt) + if err != nil { + return nil, err + } + if node.vmssName != "" { + return node, nil } - return ssNames, nil + klog.V(2).Infof("Couldn't find VMSS for node %s, refreshing the cache", nodeName) + node, err = getter(nodeName, azcache.CacheReadTypeForceRefresh) + if err != nil { + return nil, err + } + if node.vmssName == "" { + return nil, cloudprovider.InstanceNotFound + } + return node, nil } // listScaleSetVMs lists VMs belonging to the specified scale set. @@ -593,6 +663,9 @@ func (ss *scaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]compu allVMs, rerr := ss.VirtualMachineScaleSetVMsClient.List(ctx, resourceGroup, scaleSetName, string(compute.InstanceView)) if rerr != nil { klog.Errorf("VirtualMachineScaleSetVMsClient.List failed: %v", rerr) + if rerr.IsNotFound() { + return nil, cloudprovider.InstanceNotFound + } return nil, rerr.Error() } @@ -949,6 +1022,12 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back if ss.excludeMasterNodesFromStandardLB() && isMasterNode(node) { continue } + + if ss.ShouldNodeExcludedFromLoadBalancer(node) { + klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name) + continue + } + // in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes resourceGroupName, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID) if err != nil { @@ -1286,6 +1365,9 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen for vmssName := range vmssNamesMap { vmss, err := ss.getVMSS(vmssName, azcache.CacheReadTypeDefault) + if err != nil { + return err + } // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. @@ -1293,10 +1375,6 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromVMSS(service *v1.Service, backen klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName) continue } - - if err != nil { - return err - } if vmss.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration, of vmss %s", vmssName) continue diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index 5a820e4bf1..b743604003 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -20,6 +20,7 @@ package azure import ( "context" + "fmt" "strings" "sync" "time" @@ -36,7 +37,6 @@ var ( vmssNameSeparator = "_" vmssKey = "k8svmssKey" - vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey" availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" availabilitySetNodesCacheTTLDefaultInSeconds = 900 @@ -53,8 +53,9 @@ type vmssVirtualMachinesEntry struct { } type vmssEntry struct { - vmss *compute.VirtualMachineScaleSet - lastUpdate time.Time + vmss *compute.VirtualMachineScaleSet + resourceGroup string + lastUpdate time.Time } func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) { @@ -80,8 +81,9 @@ func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) { continue } localCache.Store(*scaleSet.Name, &vmssEntry{ - vmss: &scaleSet, - lastUpdate: time.Now().UTC(), + vmss: &scaleSet, + resourceGroup: resourceGroup, + lastUpdate: time.Now().UTC(), }) } } @@ -109,15 +111,63 @@ func extractVmssVMName(name string) (string, string, error) { return ssName, instanceID, nil } -func (ss *scaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) { +// getVMSSVMCache returns an *azcache.TimedCache and cache key for a VMSS (creating that cache if new). +func (ss *scaleSet) getVMSSVMCache(resourceGroup, vmssName string) (string, *azcache.TimedCache, error) { + cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName)) + if entry, ok := ss.vmssVMCache.Load(cacheKey); ok { + cache := entry.(*azcache.TimedCache) + return cacheKey, cache, nil + } + + cache, err := ss.newVMSSVirtualMachinesCache(resourceGroup, vmssName, cacheKey) + if err != nil { + return "", nil, err + } + ss.vmssVMCache.Store(cacheKey, cache) + return cacheKey, cache, nil +} + +// gcVMSSVMCache delete stale VMSS VMs caches from deleted VMSSes. +func (ss *scaleSet) gcVMSSVMCache() error { + cached, err := ss.vmssCache.Get(vmssKey, azcache.CacheReadTypeUnsafe) + if err != nil { + return err + } + + vmsses := cached.(*sync.Map) + removed := map[string]bool{} + ss.vmssVMCache.Range(func(key, value interface{}) bool { + cacheKey := key.(string) + vlistIdx := cacheKey[strings.LastIndex(cacheKey, "/")+1:] + if _, ok := vmsses.Load(vlistIdx); !ok { + removed[cacheKey] = true + } + return true + }) + + for key := range removed { + ss.vmssVMCache.Delete(key) + } + + return nil +} + +// newVMSSVirtualMachinesCache instanciates a new VMs cache for VMs belonging to the provided VMSS. +func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) { + if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 { + ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds + } + vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second + getter := func(key string) (interface{}, error) { localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry oldCache := make(map[string]vmssVirtualMachinesEntry) - if ss.vmssVMCache != nil { + if vmssCache, ok := ss.vmssVMCache.Load(cacheKey); ok { // get old cache before refreshing the cache - entry, exists, err := ss.vmssVMCache.Store.GetByKey(vmssVirtualMachinesKey) + cache := vmssCache.(*azcache.TimedCache) + entry, exists, err := cache.Store.GetByKey(cacheKey) if err != nil { return nil, err } @@ -133,97 +183,94 @@ func (ss *scaleSet) newVMSSVirtualMachinesCache() (*azcache.TimedCache, error) { } } - allResourceGroups, err := ss.GetResourceGroups() + vms, err := ss.listScaleSetVMs(vmssName, resourceGroupName) if err != nil { return nil, err } - for _, resourceGroup := range allResourceGroups.List() { - scaleSetNames, err := ss.listScaleSets(resourceGroup) - if err != nil { - return nil, err + for i := range vms { + vm := vms[i] + if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { + klog.Warningf("failed to get computerName for vmssVM (%q)", vmssName) + continue } - for _, ssName := range scaleSetNames { - vms, err := ss.listScaleSetVMs(ssName, resourceGroup) - if err != nil { - return nil, err - } - - for i := range vms { - vm := vms[i] - if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { - klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) - continue - } - - computerName := strings.ToLower(*vm.OsProfile.ComputerName) - vmssVMCacheEntry := &vmssVirtualMachinesEntry{ - resourceGroup: resourceGroup, - vmssName: ssName, - instanceID: to.String(vm.InstanceID), - virtualMachine: &vm, - lastUpdate: time.Now().UTC(), - } - // set cache entry to nil when the VM is under deleting. - if vm.VirtualMachineScaleSetVMProperties != nil && - strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { - klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) - vmssVMCacheEntry.virtualMachine = nil - } - localCache.Store(computerName, vmssVMCacheEntry) - - if _, exists := oldCache[computerName]; exists { - delete(oldCache, computerName) - } - } + computerName := strings.ToLower(*vm.OsProfile.ComputerName) + vmssVMCacheEntry := &vmssVirtualMachinesEntry{ + resourceGroup: resourceGroupName, + vmssName: vmssName, + instanceID: to.String(vm.InstanceID), + virtualMachine: &vm, + lastUpdate: time.Now().UTC(), } + // set cache entry to nil when the VM is under deleting. + if vm.VirtualMachineScaleSetVMProperties != nil && + strings.EqualFold(to.String(vm.VirtualMachineScaleSetVMProperties.ProvisioningState), string(compute.ProvisioningStateDeleting)) { + klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) + vmssVMCacheEntry.virtualMachine = nil + } + localCache.Store(computerName, vmssVMCacheEntry) - // add old missing cache data with nil entries to prevent aggressive - // ARM calls during cache invalidation - for name, vmEntry := range oldCache { - // if the nil cache entry has existed for 15 minutes in the cache - // then it should not be added back to the cache - if vmEntry.virtualMachine == nil || time.Since(vmEntry.lastUpdate) > 15*time.Minute { - klog.V(5).Infof("ignoring expired entries from old cache for %s", name) - continue - } - lastUpdate := time.Now().UTC() - if vmEntry.virtualMachine == nil { - // if this is already a nil entry then keep the time the nil - // entry was first created, so we can cleanup unwanted entries - lastUpdate = vmEntry.lastUpdate - } + delete(oldCache, computerName) + } - klog.V(5).Infof("adding old entries to new cache for %s", name) - localCache.Store(name, &vmssVirtualMachinesEntry{ - resourceGroup: vmEntry.resourceGroup, - vmssName: vmEntry.vmssName, - instanceID: vmEntry.instanceID, - virtualMachine: nil, - lastUpdate: lastUpdate, - }) + // add old missing cache data with nil entries to prevent aggressive + // ARM calls during cache invalidation + for name, vmEntry := range oldCache { + // if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache + // then it should not be added back to the cache + if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > vmssVirtualMachinesCacheTTL { + klog.V(5).Infof("ignoring expired entries from old cache for %s", name) + continue + } + lastUpdate := time.Now().UTC() + if vmEntry.virtualMachine == nil { + // if this is already a nil entry then keep the time the nil + // entry was first created, so we can cleanup unwanted entries + lastUpdate = vmEntry.lastUpdate } + + klog.V(5).Infof("adding old entries to new cache for %s", name) + localCache.Store(name, &vmssVirtualMachinesEntry{ + resourceGroup: vmEntry.resourceGroup, + vmssName: vmEntry.vmssName, + instanceID: vmEntry.instanceID, + virtualMachine: nil, + lastUpdate: lastUpdate, + }) } return localCache, nil } - if ss.Config.VmssVirtualMachinesCacheTTLInSeconds == 0 { - ss.Config.VmssVirtualMachinesCacheTTLInSeconds = vmssVirtualMachinesCacheTTLDefaultInSeconds - } - return azcache.NewTimedcache(time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds)*time.Second, getter) + return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter) } func (ss *scaleSet) deleteCacheForNode(nodeName string) error { - cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey, azcache.CacheReadTypeUnsafe) + node, err := ss.getNodeIdentityByNodeName(nodeName, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) return err } - virtualMachines := cached.(*sync.Map) + cacheKey, timedcache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName) + if err != nil { + klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) + return err + } + + vmcache, err := timedcache.Get(cacheKey, azcache.CacheReadTypeUnsafe) + if err != nil { + klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err) + return err + } + virtualMachines := vmcache.(*sync.Map) virtualMachines.Delete(nodeName) + + if err := ss.gcVMSSVMCache(); err != nil { + klog.Errorf("deleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err) + } + return nil } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go b/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go index c6a5e96a41..b6aba67de3 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/azure_zones.go @@ -87,7 +87,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { if err != nil { return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel") } - return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname)) + return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname)) } // GetZoneByProviderID implements Zones.GetZoneByProviderID @@ -104,7 +104,7 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl return cloudprovider.Zone{}, nil } - nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID) + nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { return cloudprovider.Zone{}, err } @@ -126,5 +126,5 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) return cloudprovider.Zone{}, nil } - return az.vmSet.GetZoneByNodeName(string(nodeName)) + return az.VMSet.GetZoneByNodeName(string(nodeName)) } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD b/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD index 47a714646c..e81ba8ef55 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD +++ b/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD @@ -19,6 +19,7 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -31,6 +32,7 @@ go_test( "//staging/src/k8s.io/legacy-cloud-providers/azure/clients:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient:go_default_library", "//staging/src/k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient:go_default_library", + "//staging/src/k8s.io/legacy-cloud-providers/azure/retry:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go b/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go index d61e91abc1..8ea2e0152d 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go @@ -20,12 +20,14 @@ package diskclient import ( "context" + "fmt" "net/http" "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog" @@ -204,6 +206,74 @@ func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Disk, *r return result, retry.GetError(resp, err) } +// Update creates or updates a Disk. +func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error { + mc := metrics.NewMetricContext("disks", "update", resourceGroupName, c.subscriptionID, "") + + // Report errors if the client is rate limited. + if !c.rateLimiterWriter.TryAccept() { + mc.RateLimitedCount() + return retry.GetRateLimitError(true, "DiskUpdate") + } + + // Report errors if the client is throttled. + if c.RetryAfterWriter.After(time.Now()) { + mc.ThrottledCount() + rerr := retry.GetThrottlingError("DiskUpdate", "client throttled", c.RetryAfterWriter) + return rerr + } + + rerr := c.updateDisk(ctx, resourceGroupName, diskName, diskParameter) + mc.Observe(rerr.Error()) + if rerr != nil { + if rerr.IsThrottled() { + // Update RetryAfterReader so that no more requests would be sent until RetryAfter expires. + c.RetryAfterWriter = rerr.RetryAfter + } + + return rerr + } + + return nil +} + +// updateDisk updates a Disk. +func (c *Client) updateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error { + resourceID := armclient.GetResourceID( + c.subscriptionID, + resourceGroupName, + "Microsoft.Compute/disks", + diskName, + ) + + response, rerr := c.armClient.PatchResource(ctx, resourceID, diskParameter) + defer c.armClient.CloseResponse(ctx, response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.request", resourceID, rerr.Error()) + return rerr + } + + if response != nil && response.StatusCode != http.StatusNoContent { + _, rerr = c.updateResponder(response) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.respond", resourceID, rerr.Error()) + return rerr + } + } + + return nil +} + +func (c *Client) updateResponder(resp *http.Response) (*compute.Disk, *retry.Error) { + result := &compute.Disk{} + err := autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result)) + result.Response = autorest.Response{Response: resp} + return result, retry.GetError(resp, err) +} + // Delete deletes a Disk by name. func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error { mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "") @@ -246,3 +316,126 @@ func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskN return c.armClient.DeleteResource(ctx, resourceID, "") } + +// ListByResourceGroup lists all the disks under a resource group. +func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks", + autorest.Encode("path", c.subscriptionID), + autorest.Encode("path", resourceGroupName)) + + result := make([]compute.Disk, 0) + page := &DiskListPage{} + page.fn = c.listNextResults + + resp, rerr := c.armClient.GetResource(ctx, resourceID, "") + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.request", resourceID, rerr.Error()) + return result, rerr + } + + var err error + page.dl, err = c.listResponder(resp) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.respond", resourceID, err) + return result, retry.GetError(resp, err) + } + + for page.NotDone() { + result = append(result, *page.Response().Value...) + if err = page.NextWithContext(ctx); err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.next", resourceID, err) + return result, retry.GetError(page.Response().Response.Response, err) + } + } + + return result, nil +} + +// listNextResults retrieves the next set of results, if any. +func (c *Client) listNextResults(ctx context.Context, lastResults compute.DiskList) (result compute.DiskList, err error) { + req, err := c.diskListPreparer(ctx, lastResults) + if err != nil { + return result, autorest.NewErrorWithError(err, "diskclient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, rerr := c.armClient.Send(ctx, req) + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(rerr.Error(), "diskclient", "listNextResults", resp, "Failure sending next results request") + } + + result, err = c.listResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "diskclient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// listResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) { + if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lr.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(context.Context, compute.DiskList) (compute.DiskList, error) + dl compute.DiskList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { + next, err := page.fn(ctx, page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() compute.DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []compute.Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go b/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go index 004fe1e09d..4e53e3c795 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go @@ -40,6 +40,12 @@ type Interface interface { // CreateOrUpdate creates or updates a Disk. CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error + // Update updates a Disk. + Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error + // Delete deletes a Disk by name. Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error + + // ListByResourceGroup lists all the disks under a resource group. + ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) } diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go b/vendor/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go index 93c51daa87..928c9c42fb 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/metrics/azure_metrics.go @@ -86,6 +86,7 @@ func registerAPIMetrics(attributes ...string) *apiCallMetrics { &metrics.HistogramOpts{ Name: "cloudprovider_azure_api_request_duration_seconds", Help: "Latency of an Azure API call", + Buckets: []float64{.1, .25, .5, 1, 2.5, 5, 10, 15, 25, 50, 120, 300, 600, 1200}, StabilityLevel: metrics.ALPHA, }, attributes, diff --git a/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go b/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go index e2fffdbcdf..fb7a7bab8a 100644 --- a/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go +++ b/vendor/k8s.io/legacy-cloud-providers/azure/retry/azure_error.go @@ -89,6 +89,15 @@ func (err *Error) IsThrottled() bool { return err.HTTPStatusCode == http.StatusTooManyRequests || err.RetryAfter.After(now()) } +// IsNotFound returns true the if the requested object wasn't found +func (err *Error) IsNotFound() bool { + if err == nil { + return false + } + + return err.HTTPStatusCode == http.StatusNotFound +} + // NewError creates a new Error. func NewError(retriable bool, err error) *Error { return &Error{ @@ -286,3 +295,20 @@ func IsErrorRetriable(err error) bool { return strings.Contains(err.Error(), "Retriable: true") } + +// HasStatusForbiddenOrIgnoredError return true if the given error code is part of the error message +// This should only be used when trying to delete resources +func HasStatusForbiddenOrIgnoredError(err error) bool { + if err == nil { + return false + } + + if strings.Contains(err.Error(), fmt.Sprintf("HTTPStatusCode: %d", http.StatusNotFound)) { + return true + } + + if strings.Contains(err.Error(), fmt.Sprintf("HTTPStatusCode: %d", http.StatusForbidden)) { + return true + } + return false +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 732831ee49..8b84c2a9f3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,7 +59,7 @@ github.com/docker/spdystream/spdy # github.com/emicklei/go-restful v2.9.5+incompatible github.com/emicklei/go-restful github.com/emicklei/go-restful/log -# github.com/evanphx/json-patch v4.2.0+incompatible +# github.com/evanphx/json-patch v4.9.0+incompatible github.com/evanphx/json-patch # github.com/go-openapi/jsonpointer v0.19.3 github.com/go-openapi/jsonpointer @@ -354,7 +354,7 @@ gopkg.in/square/go-jose.v2/jwt gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# k8s.io/api v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/api v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/api v0.0.0-20201015014302-62876fc6d93e k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -397,7 +397,7 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/apimachinery v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20201015014302-62876fc6d93e k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -453,7 +453,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/apiserver v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20201015014302-62876fc6d93e k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/initializer @@ -575,7 +575,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/client-go v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/client-go v0.0.0-20201015014302-62876fc6d93e k8s.io/client-go/discovery k8s.io/client-go/discovery/cached/memory k8s.io/client-go/dynamic @@ -759,7 +759,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/cloud-provider v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20201015014302-62876fc6d93e k8s.io/cloud-provider k8s.io/cloud-provider/api k8s.io/cloud-provider/node/helpers @@ -767,7 +767,7 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/component-base v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/component-base v0.0.0-20201015014302-62876fc6d93e k8s.io/component-base/cli/flag k8s.io/component-base/cli/globalflag k8s.io/component-base/config @@ -785,14 +785,14 @@ k8s.io/component-base/metrics/prometheus/workqueue k8s.io/component-base/metrics/testutil k8s.io/component-base/version k8s.io/component-base/version/verflag -# k8s.io/cri-api v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/cri-api v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20201015014302-62876fc6d93e k8s.io/cri-api/pkg/apis/runtime/v1alpha2 -# k8s.io/csi-translation-lib v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/csi-translation-lib v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20201015014302-62876fc6d93e k8s.io/csi-translation-lib k8s.io/csi-translation-lib/plugins # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/kube-controller-manager v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/kube-controller-manager v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20201015014302-62876fc6d93e k8s.io/kube-controller-manager/config/v1alpha1 # k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 k8s.io/kube-openapi/pkg/builder @@ -801,11 +801,11 @@ k8s.io/kube-openapi/pkg/handler k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto -# k8s.io/kube-scheduler v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/kube-scheduler v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20201015014302-62876fc6d93e k8s.io/kube-scheduler/extender/v1 -# k8s.io/kubectl v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/kubectl v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20201015014302-62876fc6d93e k8s.io/kubectl/pkg/scale -# k8s.io/kubernetes v1.18.6 +# k8s.io/kubernetes v1.18.10 k8s.io/kubernetes/cmd/cloud-controller-manager/app k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/scheme @@ -954,7 +954,7 @@ k8s.io/kubernetes/test/e2e/system k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/image k8s.io/kubernetes/third_party/forked/golang/expansion -# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20200715165012-dff82dc0de47 +# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20201015014302-62876fc6d93e k8s.io/legacy-cloud-providers/azure k8s.io/legacy-cloud-providers/azure/auth k8s.io/legacy-cloud-providers/azure/cache