diff --git a/go.mod b/go.mod index 1506ef2e4ed..324b9d9a96d 100644 --- a/go.mod +++ b/go.mod @@ -190,30 +190,30 @@ require ( replace ( github.com/kcp-dev/kcp/pkg/apis => ./pkg/apis - k8s.io/api => github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20221109181054-309ae77aff99 - k8s.io/apiextensions-apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20221109181054-309ae77aff99 - k8s.io/apimachinery => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20221109181054-309ae77aff99 - k8s.io/apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20221109181054-309ae77aff99 - k8s.io/cli-runtime => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20221109181054-309ae77aff99 - k8s.io/client-go => github.com/kcp-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20221109181054-309ae77aff99 - k8s.io/cloud-provider => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20221109181054-309ae77aff99 - k8s.io/cluster-bootstrap => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20221109181054-309ae77aff99 - k8s.io/code-generator => github.com/kcp-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20221109181054-309ae77aff99 - k8s.io/component-base => github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20221109181054-309ae77aff99 - k8s.io/component-helpers => github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20221109181054-309ae77aff99 - k8s.io/controller-manager => github.com/kcp-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20221109181054-309ae77aff99 - k8s.io/cri-api => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20221109181054-309ae77aff99 - k8s.io/csi-translation-lib => github.com/kcp-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20221109181054-309ae77aff99 - k8s.io/kube-aggregator => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20221109181054-309ae77aff99 - k8s.io/kube-controller-manager => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20221109181054-309ae77aff99 - k8s.io/kube-proxy => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20221109181054-309ae77aff99 - k8s.io/kube-scheduler => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20221109181054-309ae77aff99 - k8s.io/kubectl => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20221109181054-309ae77aff99 - k8s.io/kubelet => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20221109181054-309ae77aff99 - k8s.io/kubernetes => github.com/kcp-dev/kubernetes v0.0.0-20221109181054-309ae77aff99 - k8s.io/legacy-cloud-providers => github.com/kcp-dev/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20221109181054-309ae77aff99 - k8s.io/metrics => github.com/kcp-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20221109181054-309ae77aff99 - k8s.io/mount-utils => github.com/kcp-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20221109181054-309ae77aff99 - k8s.io/pod-security-admission => github.com/kcp-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20221109181054-309ae77aff99 - k8s.io/sample-apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20221109181054-309ae77aff99 + k8s.io/api => github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20221109214846-522b775dad04 + k8s.io/apiextensions-apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20221109214846-522b775dad04 + k8s.io/apimachinery => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20221109214846-522b775dad04 + k8s.io/apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20221109214846-522b775dad04 + k8s.io/cli-runtime => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20221109214846-522b775dad04 + k8s.io/client-go => github.com/kcp-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20221109214846-522b775dad04 + k8s.io/cloud-provider => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20221109214846-522b775dad04 + k8s.io/cluster-bootstrap => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20221109214846-522b775dad04 + k8s.io/code-generator => github.com/kcp-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20221109214846-522b775dad04 + k8s.io/component-base => github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20221109214846-522b775dad04 + k8s.io/component-helpers => github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20221109214846-522b775dad04 + k8s.io/controller-manager => github.com/kcp-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20221109214846-522b775dad04 + k8s.io/cri-api => github.com/kcp-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20221109214846-522b775dad04 + k8s.io/csi-translation-lib => github.com/kcp-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20221109214846-522b775dad04 + k8s.io/kube-aggregator => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20221109214846-522b775dad04 + k8s.io/kube-controller-manager => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20221109214846-522b775dad04 + k8s.io/kube-proxy => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20221109214846-522b775dad04 + k8s.io/kube-scheduler => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20221109214846-522b775dad04 + k8s.io/kubectl => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20221109214846-522b775dad04 + k8s.io/kubelet => github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20221109214846-522b775dad04 + k8s.io/kubernetes => github.com/kcp-dev/kubernetes v0.0.0-20221109214846-522b775dad04 + k8s.io/legacy-cloud-providers => github.com/kcp-dev/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20221109214846-522b775dad04 + k8s.io/metrics => github.com/kcp-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20221109214846-522b775dad04 + k8s.io/mount-utils => github.com/kcp-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20221109214846-522b775dad04 + k8s.io/pod-security-admission => github.com/kcp-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20221109214846-522b775dad04 + k8s.io/sample-apiserver => github.com/kcp-dev/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20221109214846-522b775dad04 ) diff --git a/go.sum b/go.sum index c9c405313e5..654c25e7084 100644 --- a/go.sum +++ b/go.sum @@ -466,49 +466,49 @@ github.com/kcp-dev/apimachinery v0.0.0-20221019133255-9e1e13940519 h1:eU1HvmmP8T github.com/kcp-dev/apimachinery v0.0.0-20221019133255-9e1e13940519/go.mod h1:qnvUHkdxOrNzX17yX+z8r81CZEBuFdveNzWqFlwZ55w= github.com/kcp-dev/client-go v0.0.0-20221025140308-a18ccea074a6 h1:Dxst7pq601Y7zNhSAec7LoBySHFWvD+djLTqoTmHrL0= github.com/kcp-dev/client-go v0.0.0-20221025140308-a18ccea074a6/go.mod h1:Qmq1OxUOSdVQ8YIGnjbya5Xt04KMJ5fN41QvErl/XnI= -github.com/kcp-dev/kubernetes v0.0.0-20221109181054-309ae77aff99 h1:nWn9pSRbB/lT70P/OkWBpymrpsjDdC/yXJ2M3KQsZ78= -github.com/kcp-dev/kubernetes v0.0.0-20221109181054-309ae77aff99/go.mod h1:k+ZjyGSN9Rk4RlEwYdsktNcVa7y+wZxueGGjp0Q1qxw= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20221109181054-309ae77aff99 h1:wKNad0L6i9X2KdfeKhEkIz6p5dcNKbv3916BBGVqHug= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20221109181054-309ae77aff99/go.mod h1:wFody8RC8+4UKbo2wOmo9flzwWm+dcqP3feljdZaIFA= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20221109181054-309ae77aff99 h1:wsxbXj5G/ACDfFDlVj5hkxs/M8kimRGcVJLklELs1+I= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20221109181054-309ae77aff99/go.mod h1:7SyIKzoueDhWhnu1rka8iTzwoyyKoAmO360kcg+kk6s= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20221109181054-309ae77aff99 h1:JSqprrzDhxQ1Wa1SS4eihmpaWPtjJ0RTBP1r0rc3wc0= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20221109181054-309ae77aff99/go.mod h1:oVp7C55aZ37Evvix9tVFxezaynScUXGEbub/hnRj/Y4= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20221109181054-309ae77aff99 h1:2gZlTT5u3YNdrEXI6zlzkaA2tUF6ZrFQDjGMyqkkyEU= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20221109181054-309ae77aff99/go.mod h1:HUs3EYVXM6JBtbzGVVdm0XrbzPDZMgzWEjBSzvaz1LI= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20221109181054-309ae77aff99 h1:L25ws7TJUX2E2j5XHP31r6n2xygUXBebwS9WNKG8xWA= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20221109181054-309ae77aff99/go.mod h1:NTrjrEBA52bKcO+PYPE4+v9NZvwmN6mXoqYdf7k6XDg= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20221109181054-309ae77aff99 h1:MMFhB8PSEQaSCZWT9l1U9JD0vj6rnMjIfzPb1tQdMDw= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20221109181054-309ae77aff99/go.mod h1:Wx7dkoAAZbUoKe+y4g9dkN/4HjNT1C9M9dlEpqb5XSs= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20221109181054-309ae77aff99 h1:Opy4pcXwwhrGUo9+BNWxLXR9Z5gIsX4hy8Yd3RBGF2Y= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20221109181054-309ae77aff99/go.mod h1:SGfIQ7+0LiP6us077QAcnG1ZzDg/Wb4RDBSn0qzlXqs= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20221109181054-309ae77aff99/go.mod h1:nhbIKlMnFsQwaKzTLunaXuTg5p4dsJtgppm3QE8crSE= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20221109181054-309ae77aff99 h1:cF6mln3P3ih2U2Qywzv51hy1mLjX7tzFrE6zijI+Xpw= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20221109181054-309ae77aff99/go.mod h1:gwJV5I+7TA9aExVbr0hGXL/qc9wqhYIV+REzmYxcG10= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20221109181054-309ae77aff99 h1:kxVmducTrC2AXdN2LghruJ5i1IvThO+2FCtVFJnXMW0= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20221109181054-309ae77aff99/go.mod h1:S4ESPXao2xPc0UotnnkB2Fu3+UVVvgfl9u815FcOhu4= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20221109181054-309ae77aff99 h1:bvOtG+wPU90sT1Wa1FY7qcaGQJaB9qKROidI39qEkQY= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20221109181054-309ae77aff99/go.mod h1:l4yvBtQIFFTSX4vGLjfYumyKthccc2l1V5dI4eTaf+8= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20221109181054-309ae77aff99 h1:V1eIyn6vfSXaju0LlwIEzNlBJH1V46SK6/Ij0i0nwVQ= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20221109181054-309ae77aff99/go.mod h1:2ouJ+ran7bCbE1fQI+5eUIaa+hGpa98ADjX0+uoFVmk= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20221109181054-309ae77aff99/go.mod h1:xwqxQ5OG+x7TPH5UuXhOFxNTUl7V2dn5ihPTHUjByL8= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20221109181054-309ae77aff99/go.mod h1:LILZAN6o86jNjqODdkkJmEtgUPRn3PgMqiJQItPmmus= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20221109181054-309ae77aff99 h1:rRWT2KfxWNEmi1U8wm/rrPjC1lYo8oLGlLLF6nVWItI= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20221109181054-309ae77aff99/go.mod h1:hnugPviEC4jZVUlxXMVXJYzIeZPjqVeQjvu5u5v7X20= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20221109181054-309ae77aff99 h1:0FvXkJdb9oaDGifF6D3syHABAdp22oJgNlVpS+EBt/8= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20221109181054-309ae77aff99/go.mod h1:gjMq6zaJZaaJ3MAePPvZKJ90dgc1XcyY+LzdP8ZhjqE= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20221109181054-309ae77aff99/go.mod h1:zq80gwVbTh307ZWZcdIXvvXk0e89+BRmGDlz6wBjtTw= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20221109181054-309ae77aff99/go.mod h1:KmKfLivF8xW3ybHNxTFgPlTqqxPV5dxeEgrPO+6mlks= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20221109181054-309ae77aff99/go.mod h1:c730pzl0liymPAZropIGRZlYiWrnIJhpBiRmI1UDyZE= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20221109181054-309ae77aff99 h1:6/5KBNNo0vBD20cEjcrVRYv9hFNZqZO+BGGOEc+7RkI= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20221109181054-309ae77aff99/go.mod h1:0pnNGns9lpuAqJt7/XWGl+xGeqGtgvW5HyYEC1RGnS4= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20221109181054-309ae77aff99/go.mod h1:DZvbiE2cFjx6DSVrLkgVmkxfNoDOwkI/AnDe5Pi5t+4= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20221109181054-309ae77aff99/go.mod h1:a8TQWZSV3HSigv147BKFJuXb/RP08XZE75MTVbE6FLk= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20221109181054-309ae77aff99 h1:CXY1gyeYJb6BBwHVZsiQBPDKTbWs1Eg80YGi+MbuJZ4= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20221109181054-309ae77aff99/go.mod h1:kzg4okwm6NVeWNVu9QMnFC0JMmdDLF+g6T2VMk6u0J4= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20221109181054-309ae77aff99 h1:QZUfsNm14r7HeqNRCF4845NixyQQ1OAWQPFbZoV6NKw= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20221109181054-309ae77aff99/go.mod h1:B+QcKln9LVN3LrpW6no3osNcKNmkG+ekdFvX6sGDxxg= -github.com/kcp-dev/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20221109181054-309ae77aff99/go.mod h1:P/WBYAdm07dyq/hrC07WFItXz6ydh8M34/DLvaCTb3w= +github.com/kcp-dev/kubernetes v0.0.0-20221109214846-522b775dad04 h1:586KcrZY4oyrkZnvGlQV9tB3g75Foq6glvvnbJIdEa8= +github.com/kcp-dev/kubernetes v0.0.0-20221109214846-522b775dad04/go.mod h1:k+ZjyGSN9Rk4RlEwYdsktNcVa7y+wZxueGGjp0Q1qxw= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20221109214846-522b775dad04 h1:Nxu4g/QBd3KAioCFjXNWtUyMS/lS/aS0+8RprVpUpAk= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/api v0.0.0-20221109214846-522b775dad04/go.mod h1:wFody8RC8+4UKbo2wOmo9flzwWm+dcqP3feljdZaIFA= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20221109214846-522b775dad04 h1:N6+Jefb0t50bCiR9Z+34YxT8wysvH+XUhLW0KZUb3bE= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiextensions-apiserver v0.0.0-20221109214846-522b775dad04/go.mod h1:7SyIKzoueDhWhnu1rka8iTzwoyyKoAmO360kcg+kk6s= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20221109214846-522b775dad04 h1:x5uzhRE/BM+rOPkpqsWT65DvmDp660hZcJsBGFwYiJI= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20221109214846-522b775dad04/go.mod h1:oVp7C55aZ37Evvix9tVFxezaynScUXGEbub/hnRj/Y4= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20221109214846-522b775dad04 h1:uDDEoS1EuQwUP9/bug0SDS/sefGZyi+4VJbWIwd4pZ8= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/apiserver v0.0.0-20221109214846-522b775dad04/go.mod h1:HUs3EYVXM6JBtbzGVVdm0XrbzPDZMgzWEjBSzvaz1LI= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20221109214846-522b775dad04 h1:CVNag7M2uUB1Fn2N1pCREBPSi3DUl6M9lIOUAsCmEik= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/cli-runtime v0.0.0-20221109214846-522b775dad04/go.mod h1:NTrjrEBA52bKcO+PYPE4+v9NZvwmN6mXoqYdf7k6XDg= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20221109214846-522b775dad04 h1:lsHDCP23WKtL6VIyRJwBSGRVtGIM01o1YO+FX3XWUD4= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/client-go v0.0.0-20221109214846-522b775dad04/go.mod h1:Wx7dkoAAZbUoKe+y4g9dkN/4HjNT1C9M9dlEpqb5XSs= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20221109214846-522b775dad04 h1:KaxStf3KtT+NTQztyRq/4HbUeoiiTIo2RQz+RlgSxnE= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/cloud-provider v0.0.0-20221109214846-522b775dad04/go.mod h1:SGfIQ7+0LiP6us077QAcnG1ZzDg/Wb4RDBSn0qzlXqs= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/cluster-bootstrap v0.0.0-20221109214846-522b775dad04/go.mod h1:nhbIKlMnFsQwaKzTLunaXuTg5p4dsJtgppm3QE8crSE= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20221109214846-522b775dad04 h1:S3HFN+YokTTX2F/IdI8IfXzCD9nbVQlQtK9mf//87ZM= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/code-generator v0.0.0-20221109214846-522b775dad04/go.mod h1:gwJV5I+7TA9aExVbr0hGXL/qc9wqhYIV+REzmYxcG10= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20221109214846-522b775dad04 h1:rjNvBjUMwrJHkW3Y4UNNIKufzu1rGIA9YDsOzu9RVGc= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-base v0.0.0-20221109214846-522b775dad04/go.mod h1:S4ESPXao2xPc0UotnnkB2Fu3+UVVvgfl9u815FcOhu4= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20221109214846-522b775dad04 h1:nn6aQ1JK7CfqvujBCsVwtyVVEu82Ykb7xc9Urxnw9LQ= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/component-helpers v0.0.0-20221109214846-522b775dad04/go.mod h1:l4yvBtQIFFTSX4vGLjfYumyKthccc2l1V5dI4eTaf+8= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20221109214846-522b775dad04 h1:lTYNUbxO5yacHRbPad6R6ldJy9Yy8Q0f5XqzFibq1yM= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/controller-manager v0.0.0-20221109214846-522b775dad04/go.mod h1:2ouJ+ran7bCbE1fQI+5eUIaa+hGpa98ADjX0+uoFVmk= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/cri-api v0.0.0-20221109214846-522b775dad04/go.mod h1:xwqxQ5OG+x7TPH5UuXhOFxNTUl7V2dn5ihPTHUjByL8= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/csi-translation-lib v0.0.0-20221109214846-522b775dad04/go.mod h1:LILZAN6o86jNjqODdkkJmEtgUPRn3PgMqiJQItPmmus= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20221109214846-522b775dad04 h1:FHPEdkmn22CbRNPQ1uUFU6XxCzgpFTOcNis3e5O0mtI= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-aggregator v0.0.0-20221109214846-522b775dad04/go.mod h1:hnugPviEC4jZVUlxXMVXJYzIeZPjqVeQjvu5u5v7X20= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20221109214846-522b775dad04 h1:6F5BO4BoBZ1Z+R/dIzdqVhF9+d5VQIQRPQYaj22NYQ8= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-controller-manager v0.0.0-20221109214846-522b775dad04/go.mod h1:gjMq6zaJZaaJ3MAePPvZKJ90dgc1XcyY+LzdP8ZhjqE= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-proxy v0.0.0-20221109214846-522b775dad04/go.mod h1:zq80gwVbTh307ZWZcdIXvvXk0e89+BRmGDlz6wBjtTw= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kube-scheduler v0.0.0-20221109214846-522b775dad04/go.mod h1:KmKfLivF8xW3ybHNxTFgPlTqqxPV5dxeEgrPO+6mlks= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubectl v0.0.0-20221109214846-522b775dad04/go.mod h1:c730pzl0liymPAZropIGRZlYiWrnIJhpBiRmI1UDyZE= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20221109214846-522b775dad04 h1:xg6Pj68c+0g5m1DLvwaM5Do4MjL7Q81i3GuKVGPG8pg= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/kubelet v0.0.0-20221109214846-522b775dad04/go.mod h1:0pnNGns9lpuAqJt7/XWGl+xGeqGtgvW5HyYEC1RGnS4= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/legacy-cloud-providers v0.0.0-20221109214846-522b775dad04/go.mod h1:DZvbiE2cFjx6DSVrLkgVmkxfNoDOwkI/AnDe5Pi5t+4= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/metrics v0.0.0-20221109214846-522b775dad04/go.mod h1:a8TQWZSV3HSigv147BKFJuXb/RP08XZE75MTVbE6FLk= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20221109214846-522b775dad04 h1:6xQgjFI3qumYavARrNnuqgSaImB/UO329NlAmWBGd7Q= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/mount-utils v0.0.0-20221109214846-522b775dad04/go.mod h1:kzg4okwm6NVeWNVu9QMnFC0JMmdDLF+g6T2VMk6u0J4= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20221109214846-522b775dad04 h1:QXD0keovwFHZ8rWWPssofpVQzC4u22THksfPaVCHR98= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/pod-security-admission v0.0.0-20221109214846-522b775dad04/go.mod h1:B+QcKln9LVN3LrpW6no3osNcKNmkG+ekdFvX6sGDxxg= +github.com/kcp-dev/kubernetes/staging/src/k8s.io/sample-apiserver v0.0.0-20221109214846-522b775dad04/go.mod h1:P/WBYAdm07dyq/hrC07WFItXz6ydh8M34/DLvaCTb3w= github.com/kcp-dev/logicalcluster/v2 v2.0.0-alpha.1/go.mod h1:lfWJL764jKFJxZWOGuFuT3PCCLPo6lV5Cl8P7u9T05g= github.com/kcp-dev/logicalcluster/v2 v2.0.0-alpha.3 h1:+DwIG/loh2nDB9c/FqNvLzFFq/YtBliLxAfw/uWNzyE= github.com/kcp-dev/logicalcluster/v2 v2.0.0-alpha.3/go.mod h1:lfWJL764jKFJxZWOGuFuT3PCCLPo6lV5Cl8P7u9T05g= diff --git a/pkg/informer/informer.go b/pkg/informer/informer.go index 16f9f2b3295..66f33f1859a 100644 --- a/pkg/informer/informer.go +++ b/pkg/informer/informer.go @@ -34,15 +34,20 @@ import ( "k8s.io/apiextensions-apiserver/pkg/apihelpers" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" "k8s.io/client-go/informers" "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" + "github.com/kcp-dev/kcp/pkg/indexers" metadataclient "github.com/kcp-dev/kcp/pkg/metadata" "github.com/kcp-dev/kcp/pkg/projection" ) @@ -50,7 +55,7 @@ import ( const ( resyncPeriod = 10 * time.Hour - byGroupFirstFoundVersionResourceIndex = "byGroup-firstFoundVersion-resource" + byGroupVersionResourceIndex = "byGroupVersionResource" ) // DynamicDiscoverySharedInformerFactory is a SharedInformerFactory that @@ -74,7 +79,8 @@ type DynamicDiscoverySharedInformerFactory struct { informers map[schema.GroupVersionResource]kcpkubernetesinformers.GenericClusterInformer startedInformers map[schema.GroupVersionResource]bool informerStops map[schema.GroupVersionResource]chan struct{} - discoveryData []*metav1.APIResourceList + discoveryData discoveryData + restMapper restMapper // Support subscribers (e.g. quota) that want to know when informers/discovery have changed. subscribersLock sync.Mutex @@ -114,38 +120,15 @@ func NewDynamicDiscoverySharedInformerFactory( subscribers: make(map[string]chan<- struct{}), } + f.restMapper = newRESTMapper(func() (meta.RESTMapper, error) { + return restmapper.NewDiscoveryRESTMapper(f.discoveryData.apiGroupResources), nil + }) + f.handlers.Store([]GVREventHandler{}) - // Add an index function that indexes a CRD by its group/firstServedVersion/resource. We only need the first - // served version because this shared informer factory is expected to be using a wildcard client for partial - // metadata only. In this instance, version does not matter, because a wildcard partial metadata list request - // for CRs always serves all CRs for the group-resource, regardless of storage version. + // Add an index function that indexes a CRD by its group/version/resource. if err := crdInformer.Informer().AddIndexers(cache.Indexers{ - byGroupFirstFoundVersionResourceIndex: func(obj interface{}) ([]string, error) { - crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition) - if !ok { - return nil, fmt.Errorf("%T is not a CustomResourceDefinition", obj) - } - - firstServedVersion := "" - for _, version := range crd.Spec.Versions { - if !version.Served { - continue - } - firstServedVersion = version.Name - break - } - - if firstServedVersion == "" { - return []string{}, nil - } - - group := crd.Spec.Group - resource := crd.Spec.Names.Plural - - indexValue := fmt.Sprintf("%s/%s/%s", group, firstServedVersion, resource) - return []string{indexValue}, nil - }, + byGroupVersionResourceIndex: byGroupVersionResourceIndexFunc, }); err != nil { return nil, err } @@ -192,6 +175,28 @@ func NewDynamicDiscoverySharedInformerFactory( return f, nil } +func byGroupVersionResourceKeyFunc(group, version, resource string) string { + return fmt.Sprintf("%s/%s/%s", group, version, resource) +} + +func byGroupVersionResourceIndexFunc(obj interface{}) ([]string, error) { + crd, ok := obj.(*apiextensionsv1.CustomResourceDefinition) + if !ok { + return nil, fmt.Errorf("%T is not a CustomResourceDefinition", obj) + } + + var ret []string + + for _, v := range crd.Spec.Versions { + if !v.Served { + continue + } + ret = append(ret, byGroupVersionResourceKeyFunc(crd.Spec.Group, v.Name, crd.Spec.Names.Plural)) + } + + return ret, nil +} + func (d *DynamicDiscoverySharedInformerFactory) Cluster(cluster logicalcluster.Name) kcpkubernetesinformers.ScopedDynamicSharedInformerFactory { return &scopedDynamicDiscoverySharedInformerFactory{ DynamicDiscoverySharedInformerFactory: d, @@ -410,29 +415,42 @@ func gvrFor(group, version, resource string) schema.GroupVersionResource { } } -func builtInInformableTypes() map[schema.GroupVersionResource]struct{} { +func withGVRPartialMetadata(scope apiextensionsv1.ResourceScope, kind, singular string) gvrPartialMetadata { + return gvrPartialMetadata{ + Scope: scope, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: kind, + Singular: singular, + }, + } +} + +type gvrPartialMetadata struct { + Names apiextensionsv1.CustomResourceDefinitionNames + Scope apiextensionsv1.ResourceScope +} + +func builtInInformableTypes() map[schema.GroupVersionResource]gvrPartialMetadata { // Hard-code built in types that support list+watch - latest := map[schema.GroupVersionResource]struct{}{ - gvrFor("", "v1", "configmaps"): {}, - gvrFor("", "v1", "events"): {}, - gvrFor("", "v1", "limitranges"): {}, - gvrFor("", "v1", "namespaces"): {}, - gvrFor("", "v1", "resourcequotas"): {}, - gvrFor("", "v1", "secrets"): {}, - gvrFor("", "v1", "serviceaccounts"): {}, - gvrFor("certificates.k8s.io", "v1", "certificatesigningrequests"): {}, - gvrFor("coordination.k8s.io", "v1", "leases"): {}, - gvrFor("rbac.authorization.k8s.io", "v1", "clusterroles"): {}, - gvrFor("rbac.authorization.k8s.io", "v1", "clusterrolebindings"): {}, - gvrFor("rbac.authorization.k8s.io", "v1", "roles"): {}, - gvrFor("rbac.authorization.k8s.io", "v1", "rolebindings"): {}, - gvrFor("events.k8s.io", "v1", "events"): {}, - gvrFor("admissionregistration.k8s.io", "v1", "mutatingwebhookconfigurations"): {}, - gvrFor("admissionregistration.k8s.io", "v1", "validatingwebhookconfigurations"): {}, - gvrFor("apiextensions.k8s.io", "v1", "customresourcedefinitions"): {}, - } - - return latest + return map[schema.GroupVersionResource]gvrPartialMetadata{ + gvrFor("", "v1", "configmaps"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "ConfigMap", "configmap"), + gvrFor("", "v1", "events"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "Event", "event"), + gvrFor("", "v1", "limitranges"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "LimitRange", "limitrange"), + gvrFor("", "v1", "namespaces"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "Namespace", "namespace"), + gvrFor("", "v1", "resourcequotas"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "ResourceQuota", "resourcequota"), + gvrFor("", "v1", "secrets"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "Secret", "secret"), + gvrFor("", "v1", "serviceaccounts"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "ServiceAccount", "serviceaccount"), + gvrFor("certificates.k8s.io", "v1", "certificatesigningrequests"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "CertificateSigningRequest", "certificatesigningrequest"), + gvrFor("coordination.k8s.io", "v1", "leases"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "Lease", "lease"), + gvrFor("rbac.authorization.k8s.io", "v1", "clusterroles"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "ClusterRole", "clusterrole"), + gvrFor("rbac.authorization.k8s.io", "v1", "clusterrolebindings"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "ClusterRoleBinding", "clusterrolebinding"), + gvrFor("rbac.authorization.k8s.io", "v1", "roles"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "Role", "role"), + gvrFor("rbac.authorization.k8s.io", "v1", "rolebindings"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "RoleBinding", "rolebinding"), + gvrFor("events.k8s.io", "v1", "events"): withGVRPartialMetadata(apiextensionsv1.NamespaceScoped, "Event", "event"), + gvrFor("admissionregistration.k8s.io", "v1", "mutatingwebhookconfigurations"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "MutatingWebhookConfiguration", "mutatingwebhookconfiguration"), + gvrFor("admissionregistration.k8s.io", "v1", "validatingwebhookconfigurations"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "ValidatingWebhookConfiguration", "validatingwebhookconfiguration"), + gvrFor("apiextensions.k8s.io", "v1", "customresourcedefinitions"): withGVRPartialMetadata(apiextensionsv1.ClusterScoped, "CustomResourceDefinition", "customresourcedefinition"), + } } func (d *DynamicDiscoverySharedInformerFactory) updateInformers() { @@ -442,7 +460,7 @@ func (d *DynamicDiscoverySharedInformerFactory) updateInformers() { // Get the unique set of Group(Version)Resources (version doesn't matter because we're expecting a wildcard // partial metadata client, but we need a version in the request, so we need it here) and add them to latest. - crdGVRs := d.crdIndexer.ListIndexFuncValues(byGroupFirstFoundVersionResourceIndex) + crdGVRs := d.crdIndexer.ListIndexFuncValues(byGroupVersionResourceIndex) for _, s := range crdGVRs { parts := strings.Split(s, "/") group := parts[0] @@ -458,7 +476,24 @@ func (d *DynamicDiscoverySharedInformerFactory) updateInformers() { continue } - latest[gvr] = struct{}{} + obj, err := indexers.ByIndex[*apiextensionsv1.CustomResourceDefinition](d.crdIndexer, byGroupVersionResourceIndex, byGroupVersionResourceKeyFunc(gvr.Group, gvr.Version, gvr.Resource)) + if err != nil { + utilruntime.HandleError(err) + continue + } + if len(obj) == 0 { + utilruntime.HandleError(fmt.Errorf("unable to retrieve CRD for GVR: %s", gvr)) + continue + } + // We assume CRDs partial metadata for the same GVR are constant + crd := obj[0] + latest[gvr] = gvrPartialMetadata{ + Scope: crd.Spec.Scope, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: crd.Spec.Names.Kind, + Singular: crd.Spec.Names.Singular, + }, + } } // Grab a read lock to compare against d.informers to see if we need to start or stop any informers @@ -517,6 +552,9 @@ func (d *DynamicDiscoverySharedInformerFactory) updateInformers() { } d.discoveryData = gvrsToDiscoveryData(latest) + d.restMapper = newRESTMapper(func() (meta.RESTMapper, error) { + return restmapper.NewDiscoveryRESTMapper(d.discoveryData.apiGroupResources), nil + }) d.subscribersLock.Lock() defer d.subscribersLock.Unlock() @@ -530,56 +568,79 @@ func (d *DynamicDiscoverySharedInformerFactory) updateInformers() { klog.V(4).InfoS("Unable to notify discovery subscriber - channel full", "id", id) } } - } -// gvrsToDiscoveryData returns "fake"/simulated discovery data for all the resources covered by the factory. It only -// includes enough data in each APIResource to support what kcp currently needs (scheduling, placement, quota). -func gvrsToDiscoveryData(gvrs map[schema.GroupVersionResource]struct{}) []*metav1.APIResourceList { - var discoveryData []*metav1.APIResourceList - gvResources := make(map[schema.GroupVersion][]metav1.APIResource) +// gvrsToDiscoveryData returns discovery data for all the resources covered by the factory. It only +// includes enough data in each APIResource to support what kcp currently needs (scheduling, placement, quota, GC). +func gvrsToDiscoveryData(gvrs map[schema.GroupVersionResource]gvrPartialMetadata) discoveryData { + ret := discoveryData{ + apiGroupResources: make([]*restmapper.APIGroupResources, 0), + apiResourceList: make([]*metav1.APIResourceList, 0), + } - for gvr := range gvrs { - gv := gvr.GroupVersion() + gvResources := make(map[string]map[string][]metav1.APIResource) - gvResources[gv] = append(gvResources[gv], metav1.APIResource{ - Name: gvr.Resource, - Group: gvr.Group, - Version: gvr.Version, + for gvr, metadata := range gvrs { + apiResource := metav1.APIResource{ + Name: gvr.Resource, + Group: gvr.Group, + Version: gvr.Version, + Kind: metadata.Names.Kind, + SingularName: metadata.Names.Singular, + Namespaced: metadata.Scope == apiextensionsv1.NamespaceScoped, // Everything we're informing on supports these Verbs: []string{"create", "list", "watch", "delete"}, - }) + } + if gvResources[gvr.Group] == nil { + gvResources[gvr.Group] = make(map[string][]metav1.APIResource) + } + gvResources[gvr.Group][gvr.Version] = append(gvResources[gvr.Group][gvr.Version], apiResource) } - for gv, resources := range gvResources { - sort.Slice(resources, func(i, j int) bool { - return resources[i].Name < resources[j].Name - }) + for group, resources := range gvResources { + var versions []metav1.GroupVersionForDiscovery + versionedResources := make(map[string][]metav1.APIResource) - discoveryData = append(discoveryData, &metav1.APIResourceList{ - GroupVersion: gv.String(), - APIResources: resources, - }) - } + for version, apiResource := range resources { + versions = append(versions, metav1.GroupVersionForDiscovery{GroupVersion: group, Version: version}) - sort.Slice(discoveryData, func(i, j int) bool { - return discoveryData[i].GroupVersion < discoveryData[j].GroupVersion - }) + sort.Slice(apiResource, func(i, j int) bool { + return apiResource[i].Name < apiResource[j].Name + }) - return discoveryData -} + versionedResources[version] = apiResource -// DiscoveryData implements resourcequota.NamespacedResourcesFunc and is intended to be used by the quota subsystem. -func (d *DynamicDiscoverySharedInformerFactory) DiscoveryData() ([]*metav1.APIResourceList, error) { - d.informersLock.RLock() - defer d.informersLock.RUnlock() + apiResourceList := &metav1.APIResourceList{ + GroupVersion: metav1.GroupVersion{Group: group, Version: version}.String(), + APIResources: apiResource, + } - ret := make([]*metav1.APIResourceList, len(d.discoveryData)) - for i, apiResourceList := range d.discoveryData { - ret[i] = apiResourceList.DeepCopy() + ret.apiResourceList = append(ret.apiResourceList, apiResourceList) + } + apiGroup := metav1.APIGroup{ + Name: group, + Versions: versions, + // We may want to fill the PreferredVersion based on the storage version, + // though it's not currently required by the kcp controllers that rely on + // the discovery data provided by the dynamic shared informer factory, e.g., + // the quota and garbage collector controllers. + } + + ret.apiGroupResources = append(ret.apiGroupResources, &restmapper.APIGroupResources{ + Group: apiGroup, + VersionedResources: versionedResources, + }) } - return ret, nil + sort.Slice(ret.apiGroupResources, func(i, j int) bool { + return ret.apiGroupResources[i].Group.Name < ret.apiGroupResources[j].Group.Name + }) + + sort.Slice(ret.apiResourceList, func(i, j int) bool { + return ret.apiResourceList[i].GroupVersion < ret.apiResourceList[j].GroupVersion + }) + + return ret } // Start starts any informers that have been created but not yet started. The passed in stop channel is ignored; @@ -602,7 +663,7 @@ func (d *DynamicDiscoverySharedInformerFactory) Start(_ <-chan struct{}) { } } -func (d *DynamicDiscoverySharedInformerFactory) calculateInformersLockHeld(latest map[schema.GroupVersionResource]struct{}) (toAdd, toRemove []schema.GroupVersionResource) { +func (d *DynamicDiscoverySharedInformerFactory) calculateInformersLockHeld(latest map[schema.GroupVersionResource]gvrPartialMetadata) (toAdd, toRemove []schema.GroupVersionResource) { for gvr := range latest { if _, found := d.informers[gvr]; !found { toAdd = append(toAdd, gvr) @@ -643,3 +704,97 @@ func (d *DynamicDiscoverySharedInformerFactory) Unsubscribe(id string) { delete(d.subscribers, id) } + +type discoveryData struct { + apiGroupResources []*restmapper.APIGroupResources + apiResourceList []*metav1.APIResourceList +} + +var _ discovery.ServerResourcesInterface = &DynamicDiscoverySharedInformerFactory{} + +func (d *DynamicDiscoverySharedInformerFactory) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + d.informersLock.RLock() + defer d.informersLock.RUnlock() + + for _, apiResourceList := range d.discoveryData.apiResourceList { + if apiResourceList.GroupVersion == groupVersion { + return apiResourceList.DeepCopy(), nil + } + } + + // ignore 403 or 404 error to be compatible with a v1.0 server. + if groupVersion == "v1" { + return &metav1.APIResourceList{GroupVersion: groupVersion}, nil + } + + return nil, errors.NewNotFound(schema.GroupResource{Group: groupVersion}, "") +} + +func (d *DynamicDiscoverySharedInformerFactory) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + d.informersLock.RLock() + defer d.informersLock.RUnlock() + + retGroups := make([]*metav1.APIGroup, len(d.discoveryData.apiGroupResources)) + for i, apiGroupResources := range d.discoveryData.apiGroupResources { + retGroups[i] = apiGroupResources.Group.DeepCopy() + } + + retResourceList := make([]*metav1.APIResourceList, len(d.discoveryData.apiResourceList)) + for i, apiResourceList := range d.discoveryData.apiResourceList { + retResourceList[i] = apiResourceList.DeepCopy() + } + + return retGroups, retResourceList, nil +} + +func (d *DynamicDiscoverySharedInformerFactory) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + d.informersLock.RLock() + defer d.informersLock.RUnlock() + + ret := make([]*metav1.APIResourceList, len(d.discoveryData.apiResourceList)) + for i, apiResourceList := range d.discoveryData.apiResourceList { + ret[i] = apiResourceList.DeepCopy() + } + + return ret, nil +} + +func (d *DynamicDiscoverySharedInformerFactory) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + d.informersLock.RLock() + defer d.informersLock.RUnlock() + + ret := make([]*metav1.APIResourceList, len(d.discoveryData.apiResourceList)) + for i, apiResourceList := range d.discoveryData.apiResourceList { + namespacedResources := &metav1.APIResourceList{GroupVersion: apiResourceList.GroupVersion} + for _, resource := range apiResourceList.APIResources { + if resource.Namespaced { + namespacedResources.APIResources = append(namespacedResources.APIResources, resource) + } + } + ret[i] = namespacedResources + } + + return ret, nil +} + +func (d *DynamicDiscoverySharedInformerFactory) RESTMapper() meta.ResettableRESTMapper { + return &d.restMapper +} + +func newRESTMapper(fn func() (meta.RESTMapper, error)) restMapper { + return restMapper{ + meta.NewLazyRESTMapperLoader(fn), + } +} + +type restMapper struct { + meta.RESTMapper +} + +func (r *restMapper) Reset() { + // NOOP: this is called by the Kubernetes garbage collector controller, that assumes discovery + // is refreshed periodically. As this shared informer factory pushes events whenever discovery + // changes, there is no need to reset the REST mapper during the periodic re-sync of the GC monitors. +} + +var _ meta.ResettableRESTMapper = &restMapper{} diff --git a/pkg/informer/informer_test.go b/pkg/informer/informer_test.go index ad52da709bd..7245779e40c 100644 --- a/pkg/informer/informer_test.go +++ b/pkg/informer/informer_test.go @@ -24,9 +24,11 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/restmapper" "k8s.io/kubernetes/pkg/api/genericcontrolplanescheme" _ "k8s.io/kubernetes/pkg/genericcontrolplane/apis/install" ) @@ -110,54 +112,179 @@ func TestBuiltInInformableTypes(t *testing.T) { builtInGVRs[gvr] = struct{}{} } - require.Empty(t, cmp.Diff(builtInGVRs, builtInInformableTypes())) + builtInTypes := map[schema.GroupVersionResource]struct{}{} + for gvr := range builtInInformableTypes() { + builtInTypes[gvr] = struct{}{} + } + + require.Empty(t, cmp.Diff(builtInGVRs, builtInTypes)) } func TestGVRsToDiscoveryData(t *testing.T) { - input := map[schema.GroupVersionResource]struct{}{ - {Group: "g1", Version: "v1", Resource: "g1-v1-r1"}: {}, - {Group: "g2", Version: "v1", Resource: "g2-v1-r1"}: {}, - {Group: "g1", Version: "v1", Resource: "g1-v1-r2"}: {}, - {Group: "g3", Version: "v3", Resource: "g3-v3-r1"}: {}, + input := map[schema.GroupVersionResource]gvrPartialMetadata{ + {Group: "g1", Version: "v1", Resource: "g1-v1-r1"}: { + Scope: apiextensionsv1.ClusterScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "G1v1r1", + Singular: "g1v1r1", + }, + }, + {Group: "g2", Version: "v1", Resource: "g2-v1-r1"}: { + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "G2v1r1", + Singular: "g2v1r1", + }, + }, + {Group: "g1", Version: "v1", Resource: "g1-v1-r2"}: { + Scope: apiextensionsv1.NamespaceScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "G1v1r2", + Singular: "g1v1r2", + }, + }, + {Group: "g3", Version: "v3", Resource: "g3-v3-r1"}: { + Scope: apiextensionsv1.ClusterScoped, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "G3v3r1", + Singular: "g3v3r1", + }, + }, } - expected := []*metav1.APIResourceList{ - { - GroupVersion: "g1/v1", - APIResources: []metav1.APIResource{ - { - Name: "g1-v1-r1", - Group: "g1", - Version: "v1", - Verbs: []string{"create", "list", "watch", "delete"}, + expected := discoveryData{ + apiGroupResources: []*restmapper.APIGroupResources{ + { + Group: metav1.APIGroup{ + Name: "g1", + Versions: []metav1.GroupVersionForDiscovery{ + { + GroupVersion: "g1", + Version: "v1", + }, + }, }, - { - Name: "g1-v1-r2", - Group: "g1", - Version: "v1", - Verbs: []string{"create", "list", "watch", "delete"}, + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + { + Name: "g1-v1-r1", + SingularName: "g1v1r1", + Group: "g1", + Version: "v1", + Kind: "G1v1r1", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + { + Name: "g1-v1-r2", + SingularName: "g1v1r2", + Namespaced: true, + Group: "g1", + Version: "v1", + Kind: "G1v1r2", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + }, }, }, - }, - { - GroupVersion: "g2/v1", - APIResources: []metav1.APIResource{ - { - Name: "g2-v1-r1", - Group: "g2", - Version: "v1", - Verbs: []string{"create", "list", "watch", "delete"}, + { + Group: metav1.APIGroup{ + Name: "g2", + Versions: []metav1.GroupVersionForDiscovery{ + { + GroupVersion: "g2", + Version: "v1", + }, + }, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + { + Name: "g2-v1-r1", + SingularName: "g2v1r1", + Namespaced: true, + Group: "g2", + Version: "v1", + Kind: "G2v1r1", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + }, + }, + }, + { + Group: metav1.APIGroup{ + Name: "g3", + Versions: []metav1.GroupVersionForDiscovery{ + { + GroupVersion: "g3", + Version: "v3", + }, + }, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v3": { + { + Name: "g3-v3-r1", + Group: "g3", + Version: "v3", + Namespaced: false, + Kind: "G3v3r1", + SingularName: "g3v3r1", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + }, }, }, }, - { - GroupVersion: "g3/v3", - APIResources: []metav1.APIResource{ - { - Name: "g3-v3-r1", - Group: "g3", - Version: "v3", - Verbs: []string{"create", "list", "watch", "delete"}, + apiResourceList: []*metav1.APIResourceList{ + { + GroupVersion: "g1/v1", + APIResources: []metav1.APIResource{ + { + Name: "g1-v1-r1", + Group: "g1", + Version: "v1", + Namespaced: false, + Kind: "G1v1r1", + SingularName: "g1v1r1", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + { + Name: "g1-v1-r2", + Group: "g1", + Version: "v1", + Namespaced: true, + Kind: "G1v1r2", + SingularName: "g1v1r2", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + }, + }, + { + GroupVersion: "g2/v1", + APIResources: []metav1.APIResource{ + { + Name: "g2-v1-r1", + Group: "g2", + Version: "v1", + Namespaced: true, + Kind: "G2v1r1", + SingularName: "g2v1r1", + Verbs: []string{"create", "list", "watch", "delete"}, + }, + }, + }, + { + GroupVersion: "g3/v3", + APIResources: []metav1.APIResource{ + { + Name: "g3-v3-r1", + Group: "g3", + Version: "v3", + Namespaced: false, + Kind: "G3v3r1", + SingularName: "g3v3r1", + Verbs: []string{"create", "list", "watch", "delete"}, + }, }, }, }, @@ -165,5 +292,5 @@ func TestGVRsToDiscoveryData(t *testing.T) { actual := gvrsToDiscoveryData(input) - require.Empty(t, cmp.Diff(expected, actual)) + require.Empty(t, cmp.Diff(expected, actual, cmp.AllowUnexported(discoveryData{}))) } diff --git a/pkg/projection/projected_apis.go b/pkg/projection/projected_apis.go index 1a50a5ad862..3de7021325a 100644 --- a/pkg/projection/projected_apis.go +++ b/pkg/projection/projected_apis.go @@ -37,3 +37,12 @@ func Includes(gvr schema.GroupVersionResource) bool { _, exists := projectedAPIs[gvr] return exists } + +// ProjectedAPIs returns the set of GVRs for projected APIs. +func ProjectedAPIs() map[schema.GroupVersionResource]struct{} { + ret := make(map[schema.GroupVersionResource]struct{}, len(projectedAPIs)) + for gvr := range projectedAPIs { + ret[gvr] = struct{}{} + } + return ret +} diff --git a/pkg/reconciler/garbagecollector/garbagecollector_controller.go b/pkg/reconciler/garbagecollector/garbagecollector_controller.go new file mode 100644 index 00000000000..d4c631a521e --- /dev/null +++ b/pkg/reconciler/garbagecollector/garbagecollector_controller.go @@ -0,0 +1,347 @@ +/* +Copyright 2022 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "context" + "fmt" + "sync" + "time" + + kcpcache "github.com/kcp-dev/apimachinery/pkg/cache" + kcpmetadata "github.com/kcp-dev/client-go/metadata" + "github.com/kcp-dev/logicalcluster/v2" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + kubernetesclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/component-base/metrics/prometheus/ratelimiter" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/controller/garbagecollector" + + tenancyinformers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/client/listers/tenancy/v1alpha1" + "github.com/kcp-dev/kcp/pkg/informer" + "github.com/kcp-dev/kcp/pkg/logging" + "github.com/kcp-dev/kcp/pkg/projection" +) + +const ( + ControllerName = "kcp-garbage-collector" +) + +// Controller manages per-workspace garbage collector controllers. +type Controller struct { + queue workqueue.RateLimitingInterface + + dynamicDiscoverySharedInformerFactory *informer.DynamicDiscoverySharedInformerFactory + kubeClusterClient kubernetesclient.ClusterInterface + metadataClient kcpmetadata.ClusterInterface + clusterWorkspaceLister v1alpha1.ClusterWorkspaceLister + informersStarted <-chan struct{} + + workersPerLogicalCluster int + + // lock guards the fields in this group + lock sync.RWMutex + cancelFuncs map[logicalcluster.Name]func() + + ignoredResources map[schema.GroupResource]struct{} +} + +// NewController creates a new Controller. +func NewController( + clusterWorkspaceInformer tenancyinformers.ClusterWorkspaceInformer, + kubeClusterClient kubernetesclient.ClusterInterface, + metadataClient kcpmetadata.ClusterInterface, + dynamicDiscoverySharedInformerFactory *informer.DynamicDiscoverySharedInformerFactory, + workersPerLogicalCluster int, + informersStarted <-chan struct{}, +) (*Controller, error) { + c := &Controller{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName), + + dynamicDiscoverySharedInformerFactory: dynamicDiscoverySharedInformerFactory, + kubeClusterClient: kubeClusterClient, + metadataClient: metadataClient, + clusterWorkspaceLister: clusterWorkspaceInformer.Lister(), + informersStarted: informersStarted, + + workersPerLogicalCluster: workersPerLogicalCluster, + + cancelFuncs: map[logicalcluster.Name]func(){}, + + ignoredResources: defaultIgnoredResources(), + } + + clusterWorkspaceInformer.Informer().AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: c.enqueue, + UpdateFunc: func(oldObj, newObj interface{}) { + c.enqueue(oldObj) + }, + DeleteFunc: c.enqueue, + }, + ) + + return c, nil +} + +func defaultIgnoredResources() (ret map[schema.GroupResource]struct{}) { + ret = make(map[schema.GroupResource]struct{}) + // Add default ignored resources + for gr := range garbagecollector.DefaultIgnoredResources() { + ret[gr] = struct{}{} + } + // Add projected API resources + for gvr := range projection.ProjectedAPIs() { + ret[gvr.GroupResource()] = struct{}{} + } + return ret +} + +// enqueue adds the key for a ClusterWorkspace to the queue. +func (c *Controller) enqueue(obj interface{}) { + key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + + logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) + logger.V(2).Info("queueing ClusterWorkspace") + c.queue.Add(key) +} + +// Start starts the controller. +func (c *Controller) Start(ctx context.Context, numThreads int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) + ctx = klog.NewContext(ctx, logger) + logger.Info("Starting controller") + defer logger.Info("Shutting down controller") + + for i := 0; i < numThreads; i++ { + go wait.UntilWithContext(ctx, c.startWorker, time.Second) + } + + <-ctx.Done() +} + +// startWorker runs a single worker goroutine. +func (c *Controller) startWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +// processNextWorkItem waits for the queue to have a key available and then processes it. +func (c *Controller) processNextWorkItem(ctx context.Context) bool { + // Wait until there is a new item in the working queue + raw, quit := c.queue.Get() + if quit { + return false + } + key := raw.(string) + + logger := logging.WithQueueKey(klog.FromContext(ctx), key) + ctx = klog.NewContext(ctx, logger) + logger.V(1).Info("processing key") + + // No matter what, tell the queue we're done with this key, to unblock + // other workers. + defer c.queue.Done(key) + + if err := c.process(ctx, key); err != nil { + utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) + c.queue.AddRateLimited(key) + return true + } + + c.queue.Forget(key) + + return true +} + +// process processes a single key from the queue. +func (c *Controller) process(ctx context.Context, key string) error { + logger := klog.FromContext(ctx) + parent, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) + if err != nil { + utilruntime.HandleError(err) + return nil + } + + // turn it into root:org:ws + clusterName := parent.Join(name) + logger = logger.WithValues("logicalCluster", clusterName.String()) + + ws, err := c.clusterWorkspaceLister.Get(key) + if err != nil { + if kerrors.IsNotFound(err) { + logger.V(2).Info("ClusterWorkspace not found - stopping garbage collector controller for it (if needed)") + + c.lock.Lock() + cancel, ok := c.cancelFuncs[clusterName] + if ok { + cancel() + delete(c.cancelFuncs, clusterName) + } + c.lock.Unlock() + + c.dynamicDiscoverySharedInformerFactory.Unsubscribe("gc-" + clusterName.String()) + + return nil + } + + return err + } + logger = logging.WithObject(logger, ws) + + c.lock.Lock() + defer c.lock.Unlock() + + _, found := c.cancelFuncs[clusterName] + if found { + logger.V(4).Info("garbage collector controller already exists") + return nil + } + + logger.V(2).Info("starting garbage collector controller") + + ctx, cancel := context.WithCancel(ctx) + ctx = klog.NewContext(ctx, logger) + c.cancelFuncs[clusterName] = cancel + + if err := c.startGarbageCollectorForClusterWorkspace(ctx, clusterName); err != nil { + cancel() + return fmt.Errorf("error starting garbage collector controller for cluster %q: %w", clusterName, err) + } + + return nil +} + +func (c *Controller) startGarbageCollectorForClusterWorkspace(ctx context.Context, clusterName logicalcluster.Name) error { + logger := klog.FromContext(ctx) + + kubeClient := c.kubeClusterClient.Cluster(clusterName) + + garbageCollector, err := garbagecollector.NewClusterAwareGarbageCollector( + kubeClient, + c.metadataClient.Cluster(clusterName), + c.dynamicDiscoverySharedInformerFactory.RESTMapper(), + c.ignoredResources, + c.dynamicDiscoverySharedInformerFactory.Cluster(clusterName), + c.informersStarted, + clusterName, + ) + if err != nil { + return fmt.Errorf("failed to create the garbage collector: %w", err) + } + + if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { + if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage(clusterName.String()+"-garbage_collector_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil { + return err + } + } + + // Here we diverge from what upstream does. Upstream starts a goroutine that retrieves discovery every 30 seconds, + // starting/stopping dynamic informers as needed based on the updated discovery data. We know that kcp contains + // the combination of built-in types plus CRDs. We use that information to drive what garbage collector evaluates. + // TODO: support scoped shared dynamic discovery to avoid emitting global discovery events. + + garbageCollectorController := garbageCollectorController{ + clusterName: clusterName, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "quota-"+clusterName.String()), + work: func(ctx context.Context) { + garbageCollector.ResyncMonitors(ctx, c.dynamicDiscoverySharedInformerFactory) + }, + } + go garbageCollectorController.Start(ctx) + + apisChanged := c.dynamicDiscoverySharedInformerFactory.Subscribe("gc-" + clusterName.String()) + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-apisChanged: + logger.V(4).Info("got API change notification") + garbageCollectorController.queue.Add("resync") // this queue only ever has one key in it, as long as it's constant we are OK + } + } + }() + + // Make sure the GC monitors are synced at least once + garbageCollector.ResyncMonitors(ctx, c.dynamicDiscoverySharedInformerFactory) + + go garbageCollector.Run(ctx, c.workersPerLogicalCluster) + + return nil +} + +type garbageCollectorController struct { + clusterName logicalcluster.Name + queue workqueue.RateLimitingInterface + work func(context.Context) + previousCancel func() +} + +// Start starts the controller, which stops when ctx.Done() is closed. +func (c *garbageCollectorController) Start(ctx context.Context) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName+"-"+c.clusterName.String()+"-monitors") + ctx = klog.NewContext(ctx, logger) + logger.Info("Starting controller") + defer logger.Info("Shutting down controller") + + go wait.UntilWithContext(ctx, c.startWorker, time.Second) + <-ctx.Done() + if c.previousCancel != nil { + c.previousCancel() + } +} + +func (c *garbageCollectorController) startWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *garbageCollectorController) processNextWorkItem(ctx context.Context) bool { + key, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(key) + + if c.previousCancel != nil { + c.previousCancel() + } + + ctx, c.previousCancel = context.WithCancel(ctx) + c.work(ctx) + c.queue.Forget(key) + return true +} diff --git a/pkg/reconciler/kubequota/kubequota_controller.go b/pkg/reconciler/kubequota/kubequota_controller.go index 2fa300e8542..b42ffd7a259 100644 --- a/pkg/reconciler/kubequota/kubequota_controller.go +++ b/pkg/reconciler/kubequota/kubequota_controller.go @@ -22,16 +22,13 @@ import ( "sync" "time" - kcpapiextensionsv1informers "github.com/kcp-dev/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" kcpcache "github.com/kcp-dev/apimachinery/pkg/cache" kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" kcpcorev1informers "github.com/kcp-dev/client-go/informers/core/v1" kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" "github.com/kcp-dev/logicalcluster/v2" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/quota/v1/generic" @@ -81,7 +78,6 @@ type Controller struct { // For better testability getClusterWorkspace func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) - listCRDs func() ([]*apiextensionsv1.CustomResourceDefinition, error) } // NewController creates a new Controller. @@ -90,7 +86,6 @@ func NewController( kubeClusterClient kcpkubernetesclientset.ClusterInterface, kubeInformerFactory kcpkubernetesinformers.SharedInformerFactory, dynamicDiscoverySharedInformerFactory *informer.DynamicDiscoverySharedInformerFactory, - crdInformer kcpapiextensionsv1informers.CustomResourceDefinitionClusterInformer, quotaRecalculationPeriod time.Duration, fullResyncPeriod time.Duration, workersPerLogicalCluster int, @@ -116,10 +111,6 @@ func NewController( getClusterWorkspace: func(key string) (*tenancyv1alpha1.ClusterWorkspace, error) { return clusterWorkspacesInformer.Lister().Get(key) }, - - listCRDs: func() ([]*apiextensionsv1.CustomResourceDefinition, error) { - return crdInformer.Lister().List(labels.Everything()) - }, } clusterWorkspacesInformer.Informer().AddEventHandler( @@ -275,7 +266,7 @@ func (c *Controller) startQuotaForClusterWorkspace(ctx context.Context, clusterN ReplenishmentResyncPeriod: func() time.Duration { return c.fullResyncPeriod }, - DiscoveryFunc: c.dynamicDiscoverySharedInformerFactory.DiscoveryData, + DiscoveryFunc: c.dynamicDiscoverySharedInformerFactory.ServerPreferredResources, IgnoredResourcesFunc: quotaConfiguration.IgnoredResources, InformersStarted: c.informersStarted, Registry: generic.NewRegistry(quotaConfiguration.Evaluators()), @@ -300,7 +291,7 @@ func (c *Controller) startQuotaForClusterWorkspace(ctx context.Context, clusterN clusterName: clusterName, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "quota-"+clusterName.String()), work: func(ctx context.Context) { - resourceQuotaController.UpdateMonitors(ctx, c.dynamicDiscoverySharedInformerFactory.DiscoveryData) + resourceQuotaController.UpdateMonitors(ctx, c.dynamicDiscoverySharedInformerFactory.ServerPreferredResources) }, } go quotaController.Start(ctx) @@ -319,6 +310,9 @@ func (c *Controller) startQuotaForClusterWorkspace(ctx context.Context, clusterN } }() + // Make sure the monitors are synced at least once + resourceQuotaController.UpdateMonitors(ctx, c.dynamicDiscoverySharedInformerFactory.ServerPreferredResources) + go resourceQuotaController.Run(ctx, c.workersPerLogicalCluster) return nil diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index f7f92c7e3d3..6b9e7e56ce8 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -62,6 +62,7 @@ import ( "github.com/kcp-dev/kcp/pkg/reconciler/apis/identitycache" "github.com/kcp-dev/kcp/pkg/reconciler/apis/permissionclaimlabel" "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" + "github.com/kcp-dev/kcp/pkg/reconciler/garbagecollector" "github.com/kcp-dev/kcp/pkg/reconciler/kubequota" schedulinglocationstatus "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/location" schedulingplacement "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/placement" @@ -1115,7 +1116,6 @@ func (s *Server) installKubeQuotaController( kubeClusterClient, s.KubeSharedInformerFactory, s.DynamicDiscoverySharedInformerFactory, - s.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions(), quotaResyncPeriod, replenishmentPeriod, workersPerLogicalCluster, @@ -1206,6 +1206,51 @@ func (s *Server) installReplicationController(ctx context.Context, config *rest. }) } +func (s *Server) installGarbageCollectorController(ctx context.Context, config *rest.Config, server *genericapiserver.GenericAPIServer) error { + config = rest.CopyConfig(config) + config = rest.AddUserAgent(config, garbagecollector.ControllerName) + + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) + if err != nil { + return err + } + + metadataClient, err := kcpmetadata.NewForConfig(config) + if err != nil { + return err + } + + // TODO: make it configurable + const ( + workersPerLogicalCluster = 1 + ) + + c, err := garbagecollector.NewController( + s.KcpSharedInformerFactory.Tenancy().V1alpha1().ClusterWorkspaces(), + kubeClusterClient, + metadataClient, + s.DynamicDiscoverySharedInformerFactory, + workersPerLogicalCluster, + s.syncedCh, + ) + if err != nil { + return err + } + + return server.AddPostStartHook(postStartHookName(garbagecollector.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(garbagecollector.ControllerName)) + + if err := s.waitForSync(hookContext.StopCh); err != nil { + logger.Error(err, "failed to finish post-start-hook") + return nil // don't klog.Fatal. This only happens when context is cancelled. + } + + go c.Start(goContext(hookContext), 2) + + return nil + }) +} + func (s *Server) waitForSync(stop <-chan struct{}) error { // Wait for shared informer factories to by synced. // factory. Otherwise, informer list calls may go into backoff (before the CRDs are ready) and diff --git a/pkg/server/server.go b/pkg/server/server.go index fe2823f9de7..4c62c59d0dc 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -408,7 +408,7 @@ func (s *Server) Run(ctx context.Context) error { } if s.Options.Controllers.EnableAll || enabled.Has("cluster") { - // bootstrap root compute worspace + // bootstrap root compute workspace computeBoostraphookName := "rootComputeBoostrap" if err := s.AddPostStartHook(computeBoostraphookName, func(hookContext genericapiserver.PostStartHookContext) error { logger := logger.WithValues("postStartHook", computeBoostraphookName) @@ -510,6 +510,12 @@ func (s *Server) Run(ctx context.Context) error { } } + if s.Options.Controllers.EnableAll || enabled.Has("garbagecollector") { + if err := s.installGarbageCollectorController(ctx, controllerConfig, delegationChainHead); err != nil { + return err + } + } + if s.Options.Virtual.Enabled { if err := s.installVirtualWorkspaces(ctx, controllerConfig, delegationChainHead, s.GenericConfig.Authentication, s.GenericConfig.ExternalAddress, s.GenericConfig.AuditPolicyRuleEvaluator, s.preHandlerChainMux); err != nil { return err diff --git a/test/e2e/fixtures/apifixtures/sheriffs.go b/test/e2e/fixtures/apifixtures/sheriffs.go index c546f083965..3f60ef7588b 100644 --- a/test/e2e/fixtures/apifixtures/sheriffs.go +++ b/test/e2e/fixtures/apifixtures/sheriffs.go @@ -77,6 +77,42 @@ func NewSheriffsCRDWithSchemaDescription(group, description string) *apiextensio return crd } +func NewSheriffsCRDWithVersions(group string, versions ...string) *apiextensionsv1.CustomResourceDefinition { + crdName := fmt.Sprintf("sheriffs.%s", group) + + crd := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: crdName, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "sheriffs", + Singular: "sheriff", + Kind: "Sheriff", + ListKind: "SheriffList", + }, + Scope: "Namespaced", + }, + } + + for i, version := range versions { + crd.Spec.Versions = append(crd.Spec.Versions, apiextensionsv1.CustomResourceDefinitionVersion{ + Name: version, + Served: true, + Storage: i == len(versions)-1, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Description: "sheriff " + version, + }, + }, + }) + } + + return crd +} + // CreateSheriffsSchemaAndExport creates a sheriffs apisv1alpha1.APIResourceSchema and then creates a apisv1alpha1.APIExport to export it. func CreateSheriffsSchemaAndExport( ctx context.Context, diff --git a/test/e2e/garbagecollector/apiresourceschema_cowboys.yaml b/test/e2e/garbagecollector/apiresourceschema_cowboys.yaml new file mode 100644 index 00000000000..46ba7f1494a --- /dev/null +++ b/test/e2e/garbagecollector/apiresourceschema_cowboys.yaml @@ -0,0 +1,46 @@ +apiVersion: apis.kcp.dev/v1alpha1 +kind: APIResourceSchema +metadata: + name: today.cowboys.wildwest.dev +spec: + group: wildwest.dev + names: + kind: Cowboy + listKind: CowboyList + plural: cowboys + singular: cowboy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + description: Cowboy is part of the wild west + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CowboySpec holds the desired state of the Cowboy. + properties: + intent: + type: string + type: object + status: + description: CowboyStatus communicates the observed state of the Cowboy. + properties: + result: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} \ No newline at end of file diff --git a/test/e2e/garbagecollector/garbagecollector_test.go b/test/e2e/garbagecollector/garbagecollector_test.go new file mode 100644 index 00000000000..07cbbe2c8e2 --- /dev/null +++ b/test/e2e/garbagecollector/garbagecollector_test.go @@ -0,0 +1,646 @@ +/* +Copyright 2022 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + kcpapiextensionsclientset "github.com/kcp-dev/apiextensions-apiserver/pkg/client/clientset/versioned" + kcpapiextensionsv1client "github.com/kcp-dev/apiextensions-apiserver/pkg/client/clientset/versioned/typed/apiextensions/v1" + kcpclienthelper "github.com/kcp-dev/apimachinery/pkg/client" + kcpdynamic "github.com/kcp-dev/client-go/dynamic" + kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" + "github.com/kcp-dev/logicalcluster/v2" + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + corev1ac "k8s.io/client-go/applyconfigurations/core/v1" + metav1ac "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + + configcrds "github.com/kcp-dev/kcp/config/crds" + "github.com/kcp-dev/kcp/config/helpers" + apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" + "github.com/kcp-dev/kcp/pkg/apis/third_party/conditions/util/conditions" + kcpclient "github.com/kcp-dev/kcp/pkg/client/clientset/versioned" + "github.com/kcp-dev/kcp/test/e2e/fixtures/apifixtures" + wildwestv1alpha1 "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest/v1alpha1" + wildwestclientset "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/client/clientset/versioned" + "github.com/kcp-dev/kcp/test/e2e/framework" +) + +func TestGarbageCollectorBuiltInCoreV1Types(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + server := framework.SharedKcpServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + cfg := server.BaseConfig(t) + + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(cfg) + require.NoError(t, err, "error creating kube cluster client") + + orgClusterName := framework.NewOrganizationFixture(t, server) + + ws := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-builtins")) + + t.Logf("Creating owner configmap") + owner, err := kubeClusterClient.Cluster(ws).CoreV1().ConfigMaps("default").Apply(ctx, + corev1ac.ConfigMap("owner", "default"), + metav1.ApplyOptions{FieldManager: "e2e-test-runner"}) + require.NoError(t, err, "Error applying owner configmap %s|default/owner", ws) + + t.Logf("Creating owned configmap") + owned, err := kubeClusterClient.Cluster(ws).CoreV1().ConfigMaps("default").Apply(ctx, + corev1ac.ConfigMap("owned", "default"). + WithOwnerReferences(metav1ac.OwnerReference(). + WithAPIVersion("v1"). + WithKind("ConfigMap"). + WithName(owner.Name). + WithUID(owner.UID)), + metav1.ApplyOptions{FieldManager: "e2e-test-runner"}) + require.NoError(t, err, "Error applying owned configmap %s|default/owned", ws) + + t.Logf("Deleting owner configmap") + err = kubeClusterClient.Cluster(ws).CoreV1().ConfigMaps("default").Delete(ctx, owner.Name, metav1.DeleteOptions{}) + + t.Logf("Waiting for the owned configmap to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err = kubeClusterClient.Cluster(ws).CoreV1().ConfigMaps("default").Get(ctx, owned.Name, metav1.GetOptions{}) + return apierrors.IsNotFound(err), fmt.Sprintf("configmap not garbage collected: %s", owned.Name) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned configmap to be garbage collected") +} + +func TestGarbageCollectorTypesFromBinding(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + server := framework.SharedKcpServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + orgClusterName := framework.NewOrganizationFixture(t, server) + + apiProviderClusterName := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-api-export")) + + cfg := server.BaseConfig(t) + + kcpClusterClient, err := kcpclient.NewForConfig(cfg) + require.NoError(t, err, "error creating kcp cluster client") + + clusterCfg := kcpclienthelper.SetCluster(rest.CopyConfig(cfg), apiProviderClusterName) + apiProviderClient, err := kcpclient.NewForConfig(clusterCfg) + require.NoError(t, err) + + dynamicClusterClient, err := kcpdynamic.NewForConfig(cfg) + require.NoError(t, err, "failed to construct dynamic cluster client for server") + + t.Logf("Create the cowboy APIResourceSchema") + mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(apiProviderClient.Discovery())) + err = helpers.CreateResourceFromFS(ctx, dynamicClusterClient.Cluster(apiProviderClusterName), mapper, nil, "apiresourceschema_cowboys.yaml", testFiles) + require.NoError(t, err) + + t.Logf("Create an APIExport for it") + cowboysAPIExport := &apisv1alpha1.APIExport{ + ObjectMeta: metav1.ObjectMeta{ + Name: "today-cowboys", + }, + Spec: apisv1alpha1.APIExportSpec{ + LatestResourceSchemas: []string{"today.cowboys.wildwest.dev"}, + }, + } + _, err = kcpClusterClient.ApisV1alpha1().APIExports().Create(logicalcluster.WithCluster(ctx, apiProviderClusterName), cowboysAPIExport, metav1.CreateOptions{}) + require.NoError(t, err) + + // Test multiple workspaces in parallel + for i := 0; i < 3; i++ { + i := i + t.Run(fmt.Sprintf("tc%d", i), func(t *testing.T) { + t.Parallel() + + c, cancelFunc := context.WithCancel(ctx) + t.Cleanup(cancelFunc) + + userClusterName := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-api-binding-%d", i)) + + t.Logf("Create a binding in the user workspace") + binding := &apisv1alpha1.APIBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cowboys", + }, + Spec: apisv1alpha1.APIBindingSpec{ + Reference: apisv1alpha1.ExportReference{ + Workspace: &apisv1alpha1.WorkspaceExportReference{ + Path: apiProviderClusterName.String(), + ExportName: cowboysAPIExport.Name, + }, + }, + }, + } + + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(cfg) + require.NoError(t, err, "error creating kube cluster client") + + kcpClusterClient, err := kcpclient.NewForConfig(cfg) + require.NoError(t, err, "error creating kcp cluster client") + + _, err = kcpClusterClient.ApisV1alpha1().APIBindings().Create(logicalcluster.WithCluster(c, userClusterName), binding, metav1.CreateOptions{}) + require.NoError(t, err) + + t.Logf("Wait for the binding to be ready") + framework.Eventually(t, func() (bool, string) { + binding, err := kcpClusterClient.ApisV1alpha1().APIBindings().Get(logicalcluster.WithCluster(c, userClusterName), binding.Name, metav1.GetOptions{}) + require.NoError(t, err, "error getting binding %s", binding.Name) + condition := conditions.Get(binding, apisv1alpha1.InitialBindingCompleted) + if condition == nil { + return false, fmt.Sprintf("no %s condition exists", apisv1alpha1.InitialBindingCompleted) + } + if condition.Status == corev1.ConditionTrue { + return true, "" + } + return false, fmt.Sprintf("not done waiting for the binding to be initially bound, reason: %v - message: %v", condition.Reason, condition.Message) + }, wait.ForeverTestTimeout, time.Millisecond*100) + + wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) + require.NoError(t, err, "failed to construct wildwest cluster client for server") + + t.Logf("Wait for being able to list cowboys in the user workspace") + framework.Eventually(t, func() (bool, string) { + _, err := wildwestClusterClient.WildwestV1alpha1().Cowboys(""). + List(logicalcluster.WithCluster(c, userClusterName), metav1.ListOptions{}) + if err != nil { + return false, fmt.Sprintf("Failed to list cowboys: %v", err) + } + return true, "" + }, wait.ForeverTestTimeout, time.Millisecond*100) + + t.Logf("Creating owner cowboy") + owner, err := wildwestClusterClient.WildwestV1alpha1().Cowboys("default"). + Create(logicalcluster.WithCluster(ctx, userClusterName), + &wildwestv1alpha1.Cowboy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "owner", + }, + }, + metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owner cowboy %s|default/owner", userClusterName) + + t.Logf("Creating owned configmap") + ownedConfigMap, err := kubeClusterClient.Cluster(userClusterName).CoreV1().ConfigMaps("default").Apply(ctx, + corev1ac.ConfigMap("owned", "default"). + WithOwnerReferences(metav1ac.OwnerReference(). + WithAPIVersion(wildwestv1alpha1.SchemeGroupVersion.String()). + WithKind("Cowboy"). + WithName(owner.Name). + WithUID(owner.UID)), + metav1.ApplyOptions{FieldManager: "e2e-test-runner"}) + require.NoError(t, err, "Error applying owned configmap %s|default/owned", userClusterName) + + t.Logf("Creating owned cowboy") + ownedCowboy, err := wildwestClusterClient.WildwestV1alpha1().Cowboys("default"). + Create(logicalcluster.WithCluster(ctx, userClusterName), + &wildwestv1alpha1.Cowboy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "owned", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: wildwestv1alpha1.SchemeGroupVersion.String(), + Kind: "Cowboy", + Name: owner.Name, + UID: owner.UID, + }, + }, + }, + }, + metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owned cowboy %s|default/owner", userClusterName) + + t.Logf("Deleting owner cowboy") + err = wildwestClusterClient.WildwestV1alpha1().Cowboys("default"). + Delete(logicalcluster.WithCluster(ctx, userClusterName), owner.Name, metav1.DeleteOptions{}) + + t.Logf("Waiting for the owned configmap to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err = kubeClusterClient.Cluster(userClusterName).CoreV1().ConfigMaps("default"). + Get(ctx, ownedConfigMap.Name, metav1.GetOptions{}) + return apierrors.IsNotFound(err), fmt.Sprintf("configmap not garbage collected: %s", ownedConfigMap.Name) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned configmap to be garbage collected") + + t.Logf("Waiting for the owned cowboy to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err = wildwestClusterClient.WildwestV1alpha1().Cowboys("default"). + Get(logicalcluster.WithCluster(ctx, userClusterName), ownedCowboy.Name, metav1.GetOptions{}) + return apierrors.IsNotFound(err), fmt.Sprintf("cowboy not garbage collected: %s", ownedConfigMap.Name) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned cowboy to be garbage collected") + }) + } +} + +func TestGarbageCollectorNormalCRDs(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + server := framework.SharedKcpServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + cfg := server.BaseConfig(t) + + kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(cfg) + require.NoError(t, err, "error creating kube cluster client") + + crdClusterClient, err := kcpapiextensionsclientset.NewForConfig(cfg) + require.NoError(t, err, "failed to construct apiextensions client for server") + + dynamicClusterClient, err := kcpdynamic.NewForConfig(cfg) + require.NoError(t, err, "failed to construct dynamic client for server") + + orgClusterName := framework.NewOrganizationFixture(t, server) + + group := framework.UniqueGroup(".io") + + sheriffCRD1 := apifixtures.NewSheriffsCRDWithSchemaDescription(group, "one") + sheriffCRD2 := apifixtures.NewSheriffsCRDWithSchemaDescription(group, "two") + + ws1 := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-crd-1")) + ws2 := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-crd-2")) + + t.Logf("Install a normal sheriffs CRD into workspace 1 %q", ws1) + bootstrapCRD(t, ws1, crdClusterClient.ApiextensionsV1().CustomResourceDefinitions(), sheriffCRD1) + + t.Logf("Install another normal sheriffs CRD with a different schema into workspace 2 %q", ws2) + bootstrapCRD(t, ws2, crdClusterClient.ApiextensionsV1().CustomResourceDefinitions(), sheriffCRD2) + + sheriffsGVR := schema.GroupVersionResource{Group: group, Resource: "sheriffs", Version: "v1"} + + // Test with 2 workspaces to make sure GC works for both + workspaces := []logicalcluster.Name{ws1, ws2} + for _, ws := range workspaces { + t.Logf("Creating owner sheriff") + owner, err := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVR).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVR.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owner", + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owner sheriff %s|default/owner", ws) + + t.Logf("Creating owned configmap") + _, err = kubeClusterClient.Cluster(ws).CoreV1().ConfigMaps("default"). + Apply(ctx, corev1ac.ConfigMap("owned", "default"). + WithOwnerReferences(metav1ac.OwnerReference(). + WithAPIVersion(sheriffsGVR.GroupVersion().String()). + WithKind(owner.GetKind()). + WithName(owner.GetName()). + WithUID(owner.GetUID())), + metav1.ApplyOptions{FieldManager: "e2e-test-runner"}) + require.NoError(t, err, "Error applying owned configmap %s|default/owned", ws) + } + + t.Logf("Deleting all sheriffs") + for _, ws := range workspaces { + err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVR).Namespace("default"). + DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}) + require.NoError(t, err, "Error deleting all sheriffs in %s", ws) + } + + t.Logf("Waiting for the owned configmaps to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err1 := kubeClusterClient.Cluster(ws1).CoreV1().ConfigMaps("default").Get(ctx, "owned", metav1.GetOptions{}) + _, err2 := kubeClusterClient.Cluster(ws2).CoreV1().ConfigMaps("default").Get(ctx, "owned", metav1.GetOptions{}) + return apierrors.IsNotFound(err1) && apierrors.IsNotFound(err2), "configmaps not garbage collected" + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned configmaps to be garbage collected") +} + +func TestGarbageCollectorVersionedCRDs(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + server := framework.SharedKcpServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + cfg := server.BaseConfig(t) + + crdClusterClient, err := kcpapiextensionsclientset.NewForConfig(cfg) + require.NoError(t, err, "failed to construct apiextensions client for server") + + dynamicClusterClient, err := kcpdynamic.NewForConfig(cfg) + require.NoError(t, err, "failed to construct dynamic client for server") + + orgClusterName := framework.NewOrganizationFixture(t, server) + + group := framework.UniqueGroup(".io") + + sheriffCRD := apifixtures.NewSheriffsCRDWithVersions(group, "v1", "v2") + + ws := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-crd-versions")) + + t.Logf("Install a versioned sheriffs CRD into workspace %q", ws) + bootstrapCRD(t, ws, crdClusterClient.ApiextensionsV1().CustomResourceDefinitions(), sheriffCRD) + + sheriffsGVRv1 := schema.GroupVersionResource{Group: group, Resource: "sheriffs", Version: "v1"} + sheriffsGVRv2 := schema.GroupVersionResource{Group: group, Resource: "sheriffs", Version: "v2"} + + t.Logf("Creating owner v1 sheriff") + ownerv1, err := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv1).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVRv1.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owner-v1", + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owner sheriff %s|default/owner-v1", ws) + + t.Logf("Creating owned v1 sheriff") + _, err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv1).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVRv1.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owned-v1", + "ownerReferences": []map[string]interface{}{ + { + "apiVersion": ownerv1.GetAPIVersion(), + "kind": ownerv1.GetKind(), + "name": ownerv1.GetName(), + "uid": ownerv1.GetUID(), + }, + }, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owned sheriff %s|default/owned-v1", ws) + + t.Logf("Creating owned v2 sheriff") + _, err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv2).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVRv2.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owned-v2", + "ownerReferences": []map[string]interface{}{ + { + "apiVersion": ownerv1.GetAPIVersion(), + "kind": ownerv1.GetKind(), + "name": ownerv1.GetName(), + "uid": ownerv1.GetUID(), + }, + }, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owned sheriff %s|default/owned-v2", ws) + + t.Logf("Deleting owner v1 sheriff") + err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv1).Namespace("default"). + Delete(ctx, ownerv1.GetName(), metav1.DeleteOptions{}) + require.NoError(t, err, "Error deleting sheriff %s in %s", ownerv1.GetName(), ws) + + t.Logf("Waiting for the owned sheriffs to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err1 := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv1).Namespace("default").Get(ctx, "owned-v1", metav1.GetOptions{}) + _, err2 := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv2).Namespace("default").Get(ctx, "owned-v2", metav1.GetOptions{}) + return apierrors.IsNotFound(err1) && apierrors.IsNotFound(err2), "sheriffs not garbage collected" + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned sheriffs to be garbage collected") + + t.Logf("Creating owner v2 sheriff") + ownerv2, err := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv2).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVRv2.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owner-v2", + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owner sheriff %s|default/owner-v2", ws) + + t.Logf("Creating owned v1 sheriff") + _, err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv1).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVRv1.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owned-v1", + "ownerReferences": []map[string]interface{}{ + { + "apiVersion": ownerv2.GetAPIVersion(), + "kind": ownerv2.GetKind(), + "name": ownerv2.GetName(), + "uid": ownerv2.GetUID(), + }, + }, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owned sheriff %s|default/owned-v1", ws) + + t.Logf("Creating owned v2 sheriff") + _, err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv2).Namespace("default"). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": sheriffsGVRv2.GroupVersion().String(), + "kind": "Sheriff", + "metadata": map[string]interface{}{ + "name": "owned-v2", + "ownerReferences": []map[string]interface{}{ + { + "apiVersion": ownerv2.GetAPIVersion(), + "kind": ownerv2.GetKind(), + "name": ownerv2.GetName(), + "uid": ownerv2.GetUID(), + }, + }, + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owned sheriff %s|default/owned-v2", ws) + + t.Logf("Deleting owner v2 sheriff") + err = dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv2).Namespace("default"). + Delete(ctx, ownerv2.GetName(), metav1.DeleteOptions{}) + require.NoError(t, err, "Error deleting sheriff %s in %s", ownerv2.GetName(), ws) + + t.Logf("Waiting for the owned sheriffs to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err1 := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv1).Namespace("default").Get(ctx, "owned-v1", metav1.GetOptions{}) + _, err2 := dynamicClusterClient.Cluster(ws).Resource(sheriffsGVRv2).Namespace("default").Get(ctx, "owned-v2", metav1.GetOptions{}) + return apierrors.IsNotFound(err1) && apierrors.IsNotFound(err2), "sheriffs not garbage collected" + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned sheriffs to be garbage collected") +} + +func TestGarbageCollectorClusterScopedCRD(t *testing.T) { + t.Parallel() + framework.Suite(t, "control-plane") + + server := framework.SharedKcpServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + cfg := server.BaseConfig(t) + + crdClusterClient, err := kcpapiextensionsclientset.NewForConfig(cfg) + require.NoError(t, err, "failed to construct apiextensions client for server") + + dynamicClusterClient, err := kcpdynamic.NewForConfig(cfg) + require.NoError(t, err, "failed to construct dynamic client for server") + + orgClusterName := framework.NewOrganizationFixture(t, server) + + group := framework.UniqueGroup(".io") + + crd := NewClusterScopedCRD(group, "clustered") + + ws := framework.NewWorkspaceFixture(t, server, orgClusterName, framework.WithName("gc-crd-cluster-scope")) + + t.Logf("Install cluster-scoped CRD into workspace %q", ws) + bootstrapCRD(t, ws, crdClusterClient.ApiextensionsV1().CustomResourceDefinitions(), crd) + + gvr := schema.GroupVersionResource{Group: group, Resource: crd.Spec.Names.Plural, Version: "v1"} + + t.Logf("Creating owner clustered") + owner, err := dynamicClusterClient.Cluster(ws).Resource(gvr). + Create(ctx, &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": gvr.GroupVersion().String(), + "kind": crd.Spec.Names.Kind, + "metadata": map[string]interface{}{ + "name": "owner", + }, + }, + }, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owner clustered %s|default/owner", ws) + + t.Logf("Creating owned clustered") + owned := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": gvr.GroupVersion().String(), + "kind": crd.Spec.Names.Kind, + "metadata": map[string]interface{}{ + "name": "owned", + }, + }, + } + owned.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: gvr.GroupVersion().String(), + Kind: owner.GetKind(), + Name: owner.GetName(), + UID: owner.GetUID(), + }, + }) + _, err = dynamicClusterClient.Cluster(ws).Resource(gvr). + Create(ctx, owned, metav1.CreateOptions{}) + require.NoError(t, err, "Error creating owned clustered %s|default/owned", ws) + + t.Logf("Deleting owner clustered") + err = dynamicClusterClient.Cluster(ws).Resource(gvr). + Delete(ctx, "owner", metav1.DeleteOptions{}) + require.NoError(t, err, "Error deleting owner clustered in %s", ws) + + t.Logf("Waiting for the owned clustered to be garbage collected") + framework.Eventually(t, func() (bool, string) { + _, err := dynamicClusterClient.Cluster(ws).Resource(gvr). + Get(ctx, "owner", metav1.GetOptions{}) + return apierrors.IsNotFound(err), "owned clustered not garbage collected" + }, wait.ForeverTestTimeout, 100*time.Millisecond, "error waiting for owned clustered to be garbage collected") +} + +func NewClusterScopedCRD(group, name string) *apiextensionsv1.CustomResourceDefinition { + return &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s.%s", pluralize(name), group), + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Singular: name, + Plural: pluralize(name), + Kind: strings.ToTitle(name), + ListKind: strings.ToTitle(name) + "List", + }, + Scope: apiextensionsv1.ClusterScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + } +} + +func pluralize(name string) string { + switch string(name[len(name)-1]) { + case "s": + return name + "es" + case "y": + return strings.TrimSuffix(name, "y") + "ies" + } + + return name + "s" +} + +func bootstrapCRD( + t *testing.T, + clusterName logicalcluster.Name, + client kcpapiextensionsv1client.CustomResourceDefinitionClusterInterface, + crd *apiextensionsv1.CustomResourceDefinition, +) { + ctx, cancelFunc := context.WithTimeout(context.Background(), wait.ForeverTestTimeout) + t.Cleanup(cancelFunc) + + err := configcrds.CreateSingle(ctx, client.Cluster(clusterName), crd) + require.NoError(t, err, "error bootstrapping CRD %s in cluster %s", crd.Name, clusterName) +} diff --git a/test/e2e/garbagecollector/support.go b/test/e2e/garbagecollector/support.go new file mode 100644 index 00000000000..1c1fdd4a1f6 --- /dev/null +++ b/test/e2e/garbagecollector/support.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "embed" +) + +//go:embed *.yaml +var testFiles embed.FS diff --git a/test/e2e/quota/quota_test.go b/test/e2e/quota/quota_test.go index 6bf35268b65..9a323f68149 100644 --- a/test/e2e/quota/quota_test.go +++ b/test/e2e/quota/quota_test.go @@ -107,16 +107,19 @@ func TestKubeQuotaCoreV1TypesFromBinding(t *testing.T) { t.Parallel() framework.Suite(t, "control-plane") + source := framework.SharedKcpServer(t) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + // Test multiple workspaces in parallel for i := 0; i < 5; i++ { t.Run(fmt.Sprintf("tc%d", i), func(t *testing.T) { t.Parallel() - ctx, cancelFunc := context.WithCancel(context.Background()) + ctx, cancelFunc := context.WithCancel(ctx) t.Cleanup(cancelFunc) - source := framework.SharedKcpServer(t) - orgClusterName := framework.NewOrganizationFixture(t, source) apiProviderClustername := framework.NewWorkspaceFixture(t, source, orgClusterName) userClusterName := framework.NewWorkspaceFixture(t, source, orgClusterName)