From 57f9aacb806d13d1e2ee4036a1823a610bda0c23 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Tue, 16 May 2023 19:22:40 +0200 Subject: [PATCH 01/15] remove most of tmc and syncer related code --- Makefile | 4 +- cmd/kubectl-kcp/cmd/kubectlKcp.go | 8 - cmd/syncer/cmd/dns.go | 57 - cmd/syncer/cmd/syncer.go | 136 -- cmd/syncer/main.go | 32 - cmd/syncer/options/dnsoptions.go | 47 - cmd/syncer/options/options.go | 104 -- cmd/virtual-workspaces/command/cmd.go | 12 +- cmd/virtual-workspaces/options/options.go | 5 - hack/update-codegen-clients.sh | 31 +- hack/update-codegen-crds.sh | 12 - .../pathannotation_admission.go | 2 - .../pathannotation_admission_test.go | 7 +- pkg/admission/reservedmetadata/admission.go | 4 - pkg/cliplugins/bind/cmd/cmd.go | 33 - pkg/cliplugins/bind/plugin/bind_compute.go | 383 ---- pkg/cliplugins/workload/cmd/cmd.go | 180 -- pkg/cliplugins/workload/plugin/sync.go | 946 ---------- pkg/cliplugins/workload/plugin/sync_test.go | 586 ------ pkg/cliplugins/workload/plugin/syncer.yaml | 227 --- pkg/cliplugins/workload/plugin/workload.go | 196 -- pkg/dns/plugin/nsmap/README.md | 15 - pkg/dns/plugin/nsmap/config.go | 83 - pkg/dns/plugin/nsmap/namespace.go | 78 - pkg/dns/plugin/nsmap/nsmap.go | 54 - pkg/dns/plugin/nsmap/setup.go | 61 - pkg/features/kcp_features.go | 25 +- pkg/indexers/indexers.go | 40 - pkg/indexers/workload.go | 38 - pkg/openapi/zz_generated.openapi.go | 1510 ++++----------- .../replication/replication_controller.go | 12 - .../deployment/deployment_controller.go | 383 ---- .../deployment/deployment_controller_test.go | 435 ----- .../location/location_controller.go | 258 --- .../scheduling/location/location_reconcile.go | 131 -- .../location/location_reconcile_test.go | 227 --- .../location/workloadcluster_helpers.go | 70 - .../placement/placement_controller.go | 350 ---- .../scheduling/placement/placement_indexes.go | 38 - .../placement/placement_reconcile.go | 82 - .../placement_reconcile_namespace.go | 87 - .../placement_reconcile_namespace_test.go | 121 -- .../placement_reconcile_scheduling.go | 157 -- .../placement_reconcile_scheduling_test.go | 233 --- .../workload/apiexport/apiresourceschema.go | 86 - .../workload_apiexport_controller.go | 254 --- .../apiexport/workload_apiexport_reconcile.go | 250 --- .../workload_apiexport_reconcile_test.go | 528 ------ .../defaultlocation_controller.go | 221 --- .../heartbeat/heartbeat_controller.go | 176 -- .../heartbeat/heartbeat_reconciler.go | 69 - .../heartbeat/heartbeat_reconciler_test.go | 97 - .../workload/heartbeat/options/options.go | 54 - .../namespace/namespace_controller.go | 252 --- .../workload/namespace/namespace_reconcile.go | 56 - .../namespace_reconcile_placementbind.go | 101 -- .../namespace_reconcile_placementbind_test.go | 145 -- .../namespace_reconcile_scheduling.go | 161 -- .../namespace_reconcile_scheduling_test.go | 365 ---- .../namespace/namespace_reconcile_status.go | 105 -- .../namespace_reconcile_status_test.go | 89 - .../placement/placement_controller.go | 398 ---- .../workload/placement/placement_indexes.go | 36 - .../workload/placement/placement_reconcile.go | 95 - .../placement_reconcile_scheduling.go | 201 -- .../placement_reconcile_scheduling_test.go | 343 ---- .../replicateclusterrole_controller.go | 65 - .../replicateclusterrolebinding_controller.go | 50 - .../replicatelogicalcluster_controller.go | 105 -- .../workload/resource/resource_controller.go | 567 ------ .../workload/resource/resource_reconcile.go | 318 ---- .../resource/resource_reconcile_test.go | 436 ----- .../synctarget/synctarget_controller.go | 226 --- .../synctarget/synctarget_reconcile.go | 125 -- .../synctarget/synctarget_reconcile_test.go | 481 ----- .../synctargetexports/synctarget_indexes.go | 61 - .../synctargetcompatible_reconcile.go | 136 -- .../synctargetcompatible_reconcile_test.go | 240 --- .../synctargetexports_controller.go | 349 ---- .../synctargetexports_reconcile.go | 117 -- .../synctargetexports_reconcile_test.go | 200 -- pkg/server/config.go | 30 +- pkg/server/controllers.go | 3 +- pkg/server/server.go | 8 +- pkg/syncer/OWNERS | 6 - pkg/syncer/apiimporter.go | 361 ---- .../controllermanager/controllermanager.go | 198 -- .../endpoint_downstream_controller.go | 252 --- .../endpoints/endpoint_downstream_process.go | 109 -- .../endpoint_downstream_process_test.go | 394 ---- pkg/syncer/indexers/indexes.go | 49 - .../namespace_downstream_controller.go | 338 ---- .../namespace/namespace_downstream_process.go | 172 -- .../namespace_downstream_process_test.go | 179 -- pkg/syncer/shared/finalizer.go | 101 -- pkg/syncer/shared/helpers.go | 15 - pkg/syncer/spec/dns/deployment_dns.yaml | 35 - pkg/syncer/spec/dns/dns_process.go | 354 ---- pkg/syncer/spec/dns/dns_process_test.go | 288 --- pkg/syncer/spec/dns/networkpolicy_dns.yaml | 45 - pkg/syncer/spec/dns/resources.go | 161 -- pkg/syncer/spec/dns/role_dns.yaml | 15 - pkg/syncer/spec/dns/rolebinding_dns.yaml | 12 - pkg/syncer/spec/dns/service_dns.yaml | 20 - pkg/syncer/spec/dns/serviceaccount_dns.yaml | 5 - pkg/syncer/spec/mutators/podspecable.go | 404 ----- pkg/syncer/spec/mutators/podspecable_test.go | 1005 ---------- pkg/syncer/spec/mutators/secrets.go | 59 - pkg/syncer/spec/mutators/secrets_test.go | 114 -- pkg/syncer/spec/spec_controller.go | 432 ----- pkg/syncer/spec/spec_process.go | 571 ------ pkg/syncer/spec/spec_process_test.go | 1612 ----------------- pkg/syncer/status/status_controller.go | 252 --- pkg/syncer/status/status_process.go | 256 --- pkg/syncer/status/status_process_test.go | 821 --------- pkg/syncer/syncer.go | 555 ------ pkg/syncer/synctarget/gvr_source.go | 343 ---- pkg/syncer/synctarget/shard_manager.go | 256 --- .../synctarget/synctarget_controller.go | 217 --- .../synctarget/synctarget_reconciler.go | 56 - pkg/syncer/synctarget/tunneler_reconciler.go | 76 - pkg/syncer/tunneler.go | 264 --- pkg/syncer/tunneler_test.go | 207 --- .../upsync/upsync_cleanup_controller.go | 191 -- pkg/syncer/upsync/upsync_cleanup_reconcile.go | 152 -- pkg/syncer/upsync/upsync_controller.go | 446 ----- pkg/syncer/upsync/upsync_process_test.go | 984 ---------- pkg/syncer/upsync/upsync_reconcile.go | 208 --- pkg/tunneler/dialer.go | 172 -- pkg/tunneler/integration_test.go | 260 --- pkg/tunneler/listener.go | 269 --- pkg/tunneler/listener_test.go | 80 - pkg/tunneler/podsubresourceproxy_handler.go | 274 --- .../podsubresourceproxy_handler_test.go | 181 -- pkg/tunneler/syncertunnel_handler.go | 111 -- pkg/tunneler/tunnel.go | 126 -- pkg/tunneler/tunnel_test.go | 99 - .../internalapis/fixtures/synctargets.yaml | 205 --- .../internalapis/fixtures/workspaces.yaml | 178 ++ .../framework/internalapis/import_test.go | 18 +- sdk/apis/scheduling/OWNERS | 2 - sdk/apis/scheduling/register.go | 21 - sdk/apis/scheduling/v1alpha1/doc.go | 20 - sdk/apis/scheduling/v1alpha1/register.go | 55 - .../scheduling/v1alpha1/types_location.go | 169 -- .../scheduling/v1alpha1/types_placement.go | 175 -- .../v1alpha1/zz_generated.deepcopy.go | 316 ---- sdk/apis/workload/helpers/syncintent.go | 77 - sdk/apis/workload/register.go | 21 - sdk/apis/workload/v1alpha1/doc.go | 20 - sdk/apis/workload/v1alpha1/helpers.go | 38 - sdk/apis/workload/v1alpha1/register.go | 53 - sdk/apis/workload/v1alpha1/register_test.go | 42 - .../workload/v1alpha1/synctarget_types.go | 218 --- sdk/apis/workload/v1alpha1/types.go | 202 --- .../v1alpha1/zz_generated.deepcopy.go | 244 --- .../v1alpha1/availableselectorlabel.go | 63 - .../v1alpha1/groupversionresource.go | 57 - .../scheduling/v1alpha1/location.go | 219 --- .../scheduling/v1alpha1/locationreference.go | 48 - .../scheduling/v1alpha1/locationspec.go | 75 - .../scheduling/v1alpha1/locationstatus.go | 48 - .../scheduling/v1alpha1/placement.go | 219 --- .../scheduling/v1alpha1/placementspec.go | 75 - .../scheduling/v1alpha1/placementstatus.go | 62 - sdk/client/applyconfiguration/utils.go | 38 - .../workload/v1alpha1/resourcetosync.go | 89 - .../workload/v1alpha1/synctarget.go | 219 --- .../workload/v1alpha1/synctargetspec.go | 83 - .../workload/v1alpha1/synctargetstatus.go | 115 -- .../workload/v1alpha1/tunnelworkspace.go | 39 - .../workload/v1alpha1/virtualworkspace.go | 48 - sdk/client/clientset/versioned/clientset.go | 26 - .../clientset/versioned/cluster/clientset.go | 24 - .../versioned/cluster/fake/clientset.go | 26 - .../versioned/cluster/scheme/register.go | 4 - .../scheduling/v1alpha1/fake/location.go | 202 --- .../scheduling/v1alpha1/fake/placement.go | 202 --- .../v1alpha1/fake/scheduling_client.go | 73 - .../typed/scheduling/v1alpha1/location.go | 72 - .../typed/scheduling/v1alpha1/placement.go | 72 - .../scheduling/v1alpha1/scheduling_client.go | 95 - .../workload/v1alpha1/fake/synctarget.go | 202 --- .../workload/v1alpha1/fake/workload_client.go | 65 - .../typed/workload/v1alpha1/synctarget.go | 72 - .../workload/v1alpha1/workload_client.go | 90 - .../versioned/fake/clientset_generated.go | 14 - .../clientset/versioned/fake/register.go | 4 - .../clientset/versioned/scheme/register.go | 4 - .../typed/scheduling/v1alpha1/doc.go | 20 - .../typed/scheduling/v1alpha1/fake/doc.go | 20 - .../scheduling/v1alpha1/fake/fake_location.go | 180 -- .../v1alpha1/fake/fake_placement.go | 180 -- .../v1alpha1/fake/fake_scheduling_client.go | 45 - .../v1alpha1/generated_expansion.go | 23 - .../typed/scheduling/v1alpha1/location.go | 244 --- .../typed/scheduling/v1alpha1/placement.go | 244 --- .../scheduling/v1alpha1/scheduling_client.go | 113 -- .../versioned/typed/workload/v1alpha1/doc.go | 20 - .../typed/workload/v1alpha1/fake/doc.go | 20 - .../workload/v1alpha1/fake/fake_synctarget.go | 180 -- .../v1alpha1/fake/fake_workload_client.go | 41 - .../workload/v1alpha1/generated_expansion.go | 21 - .../typed/workload/v1alpha1/synctarget.go | 244 --- .../workload/v1alpha1/workload_client.go | 108 -- .../informers/externalversions/factory.go | 22 - .../informers/externalversions/generic.go | 21 - .../externalversions/scheduling/interface.go | 68 - .../scheduling/v1alpha1/interface.go | 81 - .../scheduling/v1alpha1/location.go | 179 -- .../scheduling/v1alpha1/placement.go | 179 -- .../externalversions/workload/interface.go | 68 - .../workload/v1alpha1/interface.go | 67 - .../workload/v1alpha1/synctarget.go | 179 -- .../listers/scheduling/v1alpha1/location.go | 143 -- .../scheduling/v1alpha1/location_expansion.go | 28 - .../listers/scheduling/v1alpha1/placement.go | 143 -- .../v1alpha1/placement_expansion.go | 28 - .../listers/workload/v1alpha1/synctarget.go | 143 -- .../workload/v1alpha1/synctarget_expansion.go | 28 - test/e2e/framework/kcp.go | 3 - test/e2e/framework/syncer.go | 842 --------- test/e2e/framework/util.go | 3 - .../e2e/reconciler/cluster/controller_test.go | 227 --- .../deployment/deployment_coordinator_test.go | 336 ---- .../reconciler/deployment/locations/east.yaml | 14 - .../reconciler/deployment/locations/embed.go | 24 - .../reconciler/deployment/locations/west.yaml | 14 - .../deployment/workloads/deployment.yaml | 24 - .../reconciler/deployment/workloads/embed.go | 24 - test/e2e/syncer/configmap-admin-role.yaml | 11 - .../syncer/configmap-admin-rolebinding.yaml | 11 - test/e2e/syncer/deployment.yaml | 23 - test/e2e/syncer/dns/dns_test.go | 193 -- .../syncer/dns/workspace1/0-namespace1.yaml | 4 - .../syncer/dns/workspace1/0-namespace2.yaml | 4 - test/e2e/syncer/dns/workspace1/embed.go | 24 - .../dns/workspace1/ping-across-namespace.yaml | 18 - .../dns/workspace1/ping-fully-qualified.yaml | 18 - .../dns/workspace1/ping-not-qualified.yaml | 18 - test/e2e/syncer/dns/workspace1/service.yaml | 10 - .../syncer/dns/workspace2/0-namespace.yaml | 4 - test/e2e/syncer/dns/workspace2/embed.go | 24 - .../workspace2/ping-fully-qualified-fail.yaml | 18 - .../endpoints/deployment-with-upsync.yaml | 23 - .../endpoints/deployment-without-upsync.yaml | 23 - test/e2e/syncer/endpoints/embed.go | 24 - test/e2e/syncer/endpoints/endpoints_test.go | 119 -- .../syncer/endpoints/service-with-upsync.yaml | 14 - .../endpoints/service-without-upsync.yaml | 12 - .../in-cluster-config-test-deployment.yaml | 21 - test/e2e/syncer/multishard/multishard_test.go | 181 -- .../multishard/workspace1/deployment.yaml | 24 - .../e2e/syncer/multishard/workspace1/embed.go | 24 - .../multishard/workspace2/deployment.yaml | 24 - .../e2e/syncer/multishard/workspace2/embed.go | 24 - test/e2e/syncer/persistentvolume.yaml | 14 - test/e2e/syncer/syncer_test.go | 707 -------- test/e2e/syncer/tunnels_test.go | 472 ----- third_party/coredns/coremain/run.go | 75 - third_party/coredns/name.go | 93 - .../cmd/deployment_coordinator.go | 116 -- tmc/cmd/deployment-coordinator/main.go | 32 - .../deployment-coordinator/options/options.go | 56 - tmc/pkg/coordination/helpers.go | 217 --- tmc/pkg/server/config.go | 19 - tmc/pkg/server/controllers.go | 419 ----- tmc/pkg/server/options/controllers.go | 12 +- tmc/pkg/server/options/options.go | 24 +- tmc/pkg/server/server.go | 51 - tmc/pkg/virtual/options/options.go | 69 - tmc/pkg/virtual/syncer/builder/build.go | 120 -- tmc/pkg/virtual/syncer/builder/forwarding.go | 234 --- tmc/pkg/virtual/syncer/builder/template.go | 290 --- tmc/pkg/virtual/syncer/context/keys.go | 44 - .../syncer_apireconciler_controller.go | 286 --- .../syncer_apireconciler_indexes.go | 53 - .../syncer_apireconciler_reconcile.go | 232 --- tmc/pkg/virtual/syncer/doc.go | 35 - tmc/pkg/virtual/syncer/options/options.go | 69 - .../virtual/syncer/schemas/builtin/builtin.go | 100 - .../syncer/schemas/builtin/builtin_test.go | 29 - .../transformations/defaultsummarizing.go | 136 -- .../virtual/syncer/transformations/helpers.go | 74 - .../syncer/transformations/specdiff.go | 77 - .../syncer/transformations/transformer.go | 535 ------ .../transformations/transformer_test.go | 1009 ----------- .../virtual/syncer/transformations/types.go | 99 - .../syncer/upsyncer/storage_wrapper.go | 77 - .../virtual/syncer/upsyncer/transformer.go | 96 - 290 files changed, 552 insertions(+), 44982 deletions(-) delete mode 100644 cmd/syncer/cmd/dns.go delete mode 100644 cmd/syncer/cmd/syncer.go delete mode 100644 cmd/syncer/main.go delete mode 100644 cmd/syncer/options/dnsoptions.go delete mode 100644 cmd/syncer/options/options.go delete mode 100644 pkg/cliplugins/bind/plugin/bind_compute.go delete mode 100644 pkg/cliplugins/workload/cmd/cmd.go delete mode 100644 pkg/cliplugins/workload/plugin/sync.go delete mode 100644 pkg/cliplugins/workload/plugin/sync_test.go delete mode 100644 pkg/cliplugins/workload/plugin/syncer.yaml delete mode 100644 pkg/cliplugins/workload/plugin/workload.go delete mode 100644 pkg/dns/plugin/nsmap/README.md delete mode 100644 pkg/dns/plugin/nsmap/config.go delete mode 100644 pkg/dns/plugin/nsmap/namespace.go delete mode 100644 pkg/dns/plugin/nsmap/nsmap.go delete mode 100644 pkg/dns/plugin/nsmap/setup.go delete mode 100644 pkg/indexers/workload.go delete mode 100644 pkg/reconciler/coordination/deployment/deployment_controller.go delete mode 100644 pkg/reconciler/coordination/deployment/deployment_controller_test.go delete mode 100644 pkg/reconciler/scheduling/location/location_controller.go delete mode 100644 pkg/reconciler/scheduling/location/location_reconcile.go delete mode 100644 pkg/reconciler/scheduling/location/location_reconcile_test.go delete mode 100644 pkg/reconciler/scheduling/location/workloadcluster_helpers.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_controller.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_indexes.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_reconcile.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_reconcile_namespace.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_reconcile_namespace_test.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_reconcile_scheduling.go delete mode 100644 pkg/reconciler/scheduling/placement/placement_reconcile_scheduling_test.go delete mode 100644 pkg/reconciler/workload/apiexport/apiresourceschema.go delete mode 100644 pkg/reconciler/workload/apiexport/workload_apiexport_controller.go delete mode 100644 pkg/reconciler/workload/apiexport/workload_apiexport_reconcile.go delete mode 100644 pkg/reconciler/workload/apiexport/workload_apiexport_reconcile_test.go delete mode 100644 pkg/reconciler/workload/defaultlocation/defaultlocation_controller.go delete mode 100644 pkg/reconciler/workload/heartbeat/heartbeat_controller.go delete mode 100644 pkg/reconciler/workload/heartbeat/heartbeat_reconciler.go delete mode 100644 pkg/reconciler/workload/heartbeat/heartbeat_reconciler_test.go delete mode 100644 pkg/reconciler/workload/heartbeat/options/options.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_controller.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile_placementbind.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile_placementbind_test.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile_scheduling.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile_scheduling_test.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile_status.go delete mode 100644 pkg/reconciler/workload/namespace/namespace_reconcile_status_test.go delete mode 100644 pkg/reconciler/workload/placement/placement_controller.go delete mode 100644 pkg/reconciler/workload/placement/placement_indexes.go delete mode 100644 pkg/reconciler/workload/placement/placement_reconcile.go delete mode 100644 pkg/reconciler/workload/placement/placement_reconcile_scheduling.go delete mode 100644 pkg/reconciler/workload/placement/placement_reconcile_scheduling_test.go delete mode 100644 pkg/reconciler/workload/replicateclusterrole/replicateclusterrole_controller.go delete mode 100644 pkg/reconciler/workload/replicateclusterrolebinding/replicateclusterrolebinding_controller.go delete mode 100644 pkg/reconciler/workload/replicatelogicalcluster/replicatelogicalcluster_controller.go delete mode 100644 pkg/reconciler/workload/resource/resource_controller.go delete mode 100644 pkg/reconciler/workload/resource/resource_reconcile.go delete mode 100644 pkg/reconciler/workload/resource/resource_reconcile_test.go delete mode 100644 pkg/reconciler/workload/synctarget/synctarget_controller.go delete mode 100644 pkg/reconciler/workload/synctarget/synctarget_reconcile.go delete mode 100644 pkg/reconciler/workload/synctarget/synctarget_reconcile_test.go delete mode 100644 pkg/reconciler/workload/synctargetexports/synctarget_indexes.go delete mode 100644 pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile.go delete mode 100644 pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile_test.go delete mode 100644 pkg/reconciler/workload/synctargetexports/synctargetexports_controller.go delete mode 100644 pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile.go delete mode 100644 pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile_test.go delete mode 100644 pkg/syncer/OWNERS delete mode 100644 pkg/syncer/apiimporter.go delete mode 100644 pkg/syncer/controllermanager/controllermanager.go delete mode 100644 pkg/syncer/endpoints/endpoint_downstream_controller.go delete mode 100644 pkg/syncer/endpoints/endpoint_downstream_process.go delete mode 100644 pkg/syncer/endpoints/endpoint_downstream_process_test.go delete mode 100644 pkg/syncer/indexers/indexes.go delete mode 100644 pkg/syncer/namespace/namespace_downstream_controller.go delete mode 100644 pkg/syncer/namespace/namespace_downstream_process.go delete mode 100644 pkg/syncer/namespace/namespace_downstream_process_test.go delete mode 100644 pkg/syncer/shared/finalizer.go delete mode 100644 pkg/syncer/spec/dns/deployment_dns.yaml delete mode 100644 pkg/syncer/spec/dns/dns_process.go delete mode 100644 pkg/syncer/spec/dns/dns_process_test.go delete mode 100644 pkg/syncer/spec/dns/networkpolicy_dns.yaml delete mode 100644 pkg/syncer/spec/dns/resources.go delete mode 100644 pkg/syncer/spec/dns/role_dns.yaml delete mode 100644 pkg/syncer/spec/dns/rolebinding_dns.yaml delete mode 100644 pkg/syncer/spec/dns/service_dns.yaml delete mode 100644 pkg/syncer/spec/dns/serviceaccount_dns.yaml delete mode 100644 pkg/syncer/spec/mutators/podspecable.go delete mode 100644 pkg/syncer/spec/mutators/podspecable_test.go delete mode 100644 pkg/syncer/spec/mutators/secrets.go delete mode 100644 pkg/syncer/spec/mutators/secrets_test.go delete mode 100644 pkg/syncer/spec/spec_controller.go delete mode 100644 pkg/syncer/spec/spec_process.go delete mode 100644 pkg/syncer/spec/spec_process_test.go delete mode 100644 pkg/syncer/status/status_controller.go delete mode 100644 pkg/syncer/status/status_process.go delete mode 100644 pkg/syncer/status/status_process_test.go delete mode 100644 pkg/syncer/syncer.go delete mode 100644 pkg/syncer/synctarget/gvr_source.go delete mode 100644 pkg/syncer/synctarget/shard_manager.go delete mode 100644 pkg/syncer/synctarget/synctarget_controller.go delete mode 100644 pkg/syncer/synctarget/synctarget_reconciler.go delete mode 100644 pkg/syncer/synctarget/tunneler_reconciler.go delete mode 100644 pkg/syncer/tunneler.go delete mode 100644 pkg/syncer/tunneler_test.go delete mode 100644 pkg/syncer/upsync/upsync_cleanup_controller.go delete mode 100644 pkg/syncer/upsync/upsync_cleanup_reconcile.go delete mode 100644 pkg/syncer/upsync/upsync_controller.go delete mode 100644 pkg/syncer/upsync/upsync_process_test.go delete mode 100644 pkg/syncer/upsync/upsync_reconcile.go delete mode 100644 pkg/tunneler/dialer.go delete mode 100644 pkg/tunneler/integration_test.go delete mode 100644 pkg/tunneler/listener.go delete mode 100644 pkg/tunneler/listener_test.go delete mode 100644 pkg/tunneler/podsubresourceproxy_handler.go delete mode 100644 pkg/tunneler/podsubresourceproxy_handler_test.go delete mode 100644 pkg/tunneler/syncertunnel_handler.go delete mode 100644 pkg/tunneler/tunnel.go delete mode 100644 pkg/tunneler/tunnel_test.go delete mode 100644 pkg/virtual/framework/internalapis/fixtures/synctargets.yaml create mode 100644 pkg/virtual/framework/internalapis/fixtures/workspaces.yaml delete mode 100644 sdk/apis/scheduling/OWNERS delete mode 100644 sdk/apis/scheduling/register.go delete mode 100644 sdk/apis/scheduling/v1alpha1/doc.go delete mode 100644 sdk/apis/scheduling/v1alpha1/register.go delete mode 100644 sdk/apis/scheduling/v1alpha1/types_location.go delete mode 100644 sdk/apis/scheduling/v1alpha1/types_placement.go delete mode 100644 sdk/apis/scheduling/v1alpha1/zz_generated.deepcopy.go delete mode 100644 sdk/apis/workload/helpers/syncintent.go delete mode 100644 sdk/apis/workload/register.go delete mode 100644 sdk/apis/workload/v1alpha1/doc.go delete mode 100644 sdk/apis/workload/v1alpha1/helpers.go delete mode 100644 sdk/apis/workload/v1alpha1/register.go delete mode 100644 sdk/apis/workload/v1alpha1/register_test.go delete mode 100644 sdk/apis/workload/v1alpha1/synctarget_types.go delete mode 100644 sdk/apis/workload/v1alpha1/types.go delete mode 100644 sdk/apis/workload/v1alpha1/zz_generated.deepcopy.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/availableselectorlabel.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/groupversionresource.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/location.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/locationreference.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/locationspec.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/locationstatus.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/placement.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/placementspec.go delete mode 100644 sdk/client/applyconfiguration/scheduling/v1alpha1/placementstatus.go delete mode 100644 sdk/client/applyconfiguration/workload/v1alpha1/resourcetosync.go delete mode 100644 sdk/client/applyconfiguration/workload/v1alpha1/synctarget.go delete mode 100644 sdk/client/applyconfiguration/workload/v1alpha1/synctargetspec.go delete mode 100644 sdk/client/applyconfiguration/workload/v1alpha1/synctargetstatus.go delete mode 100644 sdk/client/applyconfiguration/workload/v1alpha1/tunnelworkspace.go delete mode 100644 sdk/client/applyconfiguration/workload/v1alpha1/virtualworkspace.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/location.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/placement.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/scheduling_client.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/location.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/placement.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/scheduling_client.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/synctarget.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/workload_client.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/synctarget.go delete mode 100644 sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/workload_client.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_location.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_placement.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/location.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/placement.go delete mode 100644 sdk/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/doc.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/doc.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_synctarget.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_workload_client.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/generated_expansion.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/synctarget.go delete mode 100644 sdk/client/clientset/versioned/typed/workload/v1alpha1/workload_client.go delete mode 100644 sdk/client/informers/externalversions/scheduling/interface.go delete mode 100644 sdk/client/informers/externalversions/scheduling/v1alpha1/interface.go delete mode 100644 sdk/client/informers/externalversions/scheduling/v1alpha1/location.go delete mode 100644 sdk/client/informers/externalversions/scheduling/v1alpha1/placement.go delete mode 100644 sdk/client/informers/externalversions/workload/interface.go delete mode 100644 sdk/client/informers/externalversions/workload/v1alpha1/interface.go delete mode 100644 sdk/client/informers/externalversions/workload/v1alpha1/synctarget.go delete mode 100644 sdk/client/listers/scheduling/v1alpha1/location.go delete mode 100644 sdk/client/listers/scheduling/v1alpha1/location_expansion.go delete mode 100644 sdk/client/listers/scheduling/v1alpha1/placement.go delete mode 100644 sdk/client/listers/scheduling/v1alpha1/placement_expansion.go delete mode 100644 sdk/client/listers/workload/v1alpha1/synctarget.go delete mode 100644 sdk/client/listers/workload/v1alpha1/synctarget_expansion.go delete mode 100644 test/e2e/framework/syncer.go delete mode 100644 test/e2e/reconciler/cluster/controller_test.go delete mode 100644 test/e2e/reconciler/deployment/deployment_coordinator_test.go delete mode 100644 test/e2e/reconciler/deployment/locations/east.yaml delete mode 100644 test/e2e/reconciler/deployment/locations/embed.go delete mode 100644 test/e2e/reconciler/deployment/locations/west.yaml delete mode 100644 test/e2e/reconciler/deployment/workloads/deployment.yaml delete mode 100644 test/e2e/reconciler/deployment/workloads/embed.go delete mode 100644 test/e2e/syncer/configmap-admin-role.yaml delete mode 100644 test/e2e/syncer/configmap-admin-rolebinding.yaml delete mode 100644 test/e2e/syncer/deployment.yaml delete mode 100644 test/e2e/syncer/dns/dns_test.go delete mode 100644 test/e2e/syncer/dns/workspace1/0-namespace1.yaml delete mode 100644 test/e2e/syncer/dns/workspace1/0-namespace2.yaml delete mode 100644 test/e2e/syncer/dns/workspace1/embed.go delete mode 100644 test/e2e/syncer/dns/workspace1/ping-across-namespace.yaml delete mode 100644 test/e2e/syncer/dns/workspace1/ping-fully-qualified.yaml delete mode 100644 test/e2e/syncer/dns/workspace1/ping-not-qualified.yaml delete mode 100644 test/e2e/syncer/dns/workspace1/service.yaml delete mode 100644 test/e2e/syncer/dns/workspace2/0-namespace.yaml delete mode 100644 test/e2e/syncer/dns/workspace2/embed.go delete mode 100644 test/e2e/syncer/dns/workspace2/ping-fully-qualified-fail.yaml delete mode 100644 test/e2e/syncer/endpoints/deployment-with-upsync.yaml delete mode 100644 test/e2e/syncer/endpoints/deployment-without-upsync.yaml delete mode 100644 test/e2e/syncer/endpoints/embed.go delete mode 100644 test/e2e/syncer/endpoints/endpoints_test.go delete mode 100644 test/e2e/syncer/endpoints/service-with-upsync.yaml delete mode 100644 test/e2e/syncer/endpoints/service-without-upsync.yaml delete mode 100644 test/e2e/syncer/in-cluster-config-test-deployment.yaml delete mode 100644 test/e2e/syncer/multishard/multishard_test.go delete mode 100644 test/e2e/syncer/multishard/workspace1/deployment.yaml delete mode 100644 test/e2e/syncer/multishard/workspace1/embed.go delete mode 100644 test/e2e/syncer/multishard/workspace2/deployment.yaml delete mode 100644 test/e2e/syncer/multishard/workspace2/embed.go delete mode 100644 test/e2e/syncer/persistentvolume.yaml delete mode 100644 test/e2e/syncer/syncer_test.go delete mode 100644 test/e2e/syncer/tunnels_test.go delete mode 100644 third_party/coredns/coremain/run.go delete mode 100644 third_party/coredns/name.go delete mode 100644 tmc/cmd/deployment-coordinator/cmd/deployment_coordinator.go delete mode 100644 tmc/cmd/deployment-coordinator/main.go delete mode 100644 tmc/cmd/deployment-coordinator/options/options.go delete mode 100644 tmc/pkg/coordination/helpers.go delete mode 100644 tmc/pkg/virtual/options/options.go delete mode 100644 tmc/pkg/virtual/syncer/builder/build.go delete mode 100644 tmc/pkg/virtual/syncer/builder/forwarding.go delete mode 100644 tmc/pkg/virtual/syncer/builder/template.go delete mode 100644 tmc/pkg/virtual/syncer/context/keys.go delete mode 100644 tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_controller.go delete mode 100644 tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_indexes.go delete mode 100644 tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_reconcile.go delete mode 100644 tmc/pkg/virtual/syncer/doc.go delete mode 100644 tmc/pkg/virtual/syncer/options/options.go delete mode 100644 tmc/pkg/virtual/syncer/schemas/builtin/builtin.go delete mode 100644 tmc/pkg/virtual/syncer/schemas/builtin/builtin_test.go delete mode 100644 tmc/pkg/virtual/syncer/transformations/defaultsummarizing.go delete mode 100644 tmc/pkg/virtual/syncer/transformations/helpers.go delete mode 100644 tmc/pkg/virtual/syncer/transformations/specdiff.go delete mode 100644 tmc/pkg/virtual/syncer/transformations/transformer.go delete mode 100644 tmc/pkg/virtual/syncer/transformations/transformer_test.go delete mode 100644 tmc/pkg/virtual/syncer/transformations/types.go delete mode 100644 tmc/pkg/virtual/syncer/upsyncer/storage_wrapper.go delete mode 100644 tmc/pkg/virtual/syncer/upsyncer/transformer.go diff --git a/Makefile b/Makefile index 17ee7d91ef1..6ae9ac006fe 100644 --- a/Makefile +++ b/Makefile @@ -110,7 +110,7 @@ ldflags: require-%: @if ! command -v $* 1> /dev/null 2>&1; then echo "$* not found in ${PATH}"; exit 1; fi -build: WHAT ?= ./cmd/... ./tmc/cmd/... +build: WHAT ?= ./cmd/... build: require-jq require-go require-git verify-go-versions ## Build the project GOOS=$(OS) GOARCH=$(ARCH) CGO_ENABLED=0 go build $(BUILDFLAGS) -ldflags="$(LDFLAGS)" -o bin $(WHAT) ln -sf kubectl-workspace bin/kubectl-workspaces @@ -119,7 +119,7 @@ build: require-jq require-go require-git verify-go-versions ## Build the project .PHONY: build-all build-all: - GOOS=$(OS) GOARCH=$(ARCH) $(MAKE) build WHAT='./cmd/... ./tmc/cmd/...' + GOOS=$(OS) GOARCH=$(ARCH) $(MAKE) build WHAT='./cmd/...' .PHONY: build-kind-images build-kind-images-ko: require-ko diff --git a/cmd/kubectl-kcp/cmd/kubectlKcp.go b/cmd/kubectl-kcp/cmd/kubectlKcp.go index d76b7370323..a2a4d7d6646 100644 --- a/cmd/kubectl-kcp/cmd/kubectlKcp.go +++ b/cmd/kubectl-kcp/cmd/kubectlKcp.go @@ -30,7 +30,6 @@ import ( bindcmd "github.com/kcp-dev/kcp/pkg/cliplugins/bind/cmd" claimscmd "github.com/kcp-dev/kcp/pkg/cliplugins/claims/cmd" crdcmd "github.com/kcp-dev/kcp/pkg/cliplugins/crd/cmd" - workloadcmd "github.com/kcp-dev/kcp/pkg/cliplugins/workload/cmd" workspacecmd "github.com/kcp-dev/kcp/pkg/cliplugins/workspace/cmd" "github.com/kcp-dev/kcp/pkg/cmd/help" ) @@ -72,13 +71,6 @@ func KubectlKcpCommand() *cobra.Command { } root.AddCommand(workspaceCmd) - workloadCmd, err := workloadcmd.New(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}) - if err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } - root.AddCommand(workloadCmd) - crdCmd := crdcmd.New(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}) root.AddCommand(crdCmd) diff --git a/cmd/syncer/cmd/dns.go b/cmd/syncer/cmd/dns.go deleted file mode 100644 index 589e549b4f3..00000000000 --- a/cmd/syncer/cmd/dns.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "github.com/spf13/cobra" - - synceroptions "github.com/kcp-dev/kcp/cmd/syncer/options" - "github.com/kcp-dev/kcp/pkg/dns/plugin/nsmap" - "github.com/kcp-dev/kcp/third_party/coredns/coremain" -) - -func NewDNSCommand() *cobra.Command { - options := synceroptions.NewDNSOptions() - dnsCommand := &cobra.Command{ - Use: "dns", - Short: "Manage kcp dns server", - } - - startCmd := &cobra.Command{ - Use: "start", - Short: "Start the kcp dns server", - - RunE: func(cmd *cobra.Command, args []string) error { - if err := options.Complete(); err != nil { - return err - } - if err := options.Validate(); err != nil { - return err - } - - nsmap.ConfigMapName = options.ConfigMapName - - coremain.Start() - return nil - }, - } - options.AddFlags(startCmd.Flags()) - - dnsCommand.AddCommand(startCmd) - - return dnsCommand -} diff --git a/cmd/syncer/cmd/syncer.go b/cmd/syncer/cmd/syncer.go deleted file mode 100644 index 74a6394cecc..00000000000 --- a/cmd/syncer/cmd/syncer.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "context" - "errors" - "math/rand" - "os" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/spf13/cobra" - - "k8s.io/apimachinery/pkg/util/sets" - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/client-go/tools/clientcmd" - logsapiv1 "k8s.io/component-base/logs/api/v1" - "k8s.io/component-base/version" - "k8s.io/klog/v2" - - synceroptions "github.com/kcp-dev/kcp/cmd/syncer/options" - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - "github.com/kcp-dev/kcp/pkg/syncer" -) - -const numThreads = 2 - -func NewSyncerCommand() *cobra.Command { - rand.Seed(time.Now().UTC().UnixNano()) - - options := synceroptions.NewOptions() - syncerCommand := &cobra.Command{ - Use: "syncer", - Short: "Synchronizes resources in `kcp` assigned to the clusters", - RunE: func(cmd *cobra.Command, args []string) error { - if err := logsapiv1.ValidateAndApply(options.Logs, kcpfeatures.DefaultFeatureGate); err != nil { - return err - } - if err := options.Complete(); err != nil { - return err - } - - if err := options.Validate(); err != nil { - return err - } - - ctx := genericapiserver.SetupSignalContext() - if err := Run(ctx, options); err != nil { - return err - } - - <-ctx.Done() - - return nil - }, - } - - options.AddFlags(syncerCommand.Flags()) - - if v := version.Get().String(); len(v) == 0 { - syncerCommand.Version = "" - } else { - syncerCommand.Version = v - } - - syncerCommand.AddCommand(NewDNSCommand()) - - return syncerCommand -} - -func Run(ctx context.Context, options *synceroptions.Options) error { - logger := klog.FromContext(ctx) - logger.Info("syncing", "resource-types", options.SyncedResourceTypes) - - kcpConfigOverrides := &clientcmd.ConfigOverrides{ - CurrentContext: options.FromContext, - } - upstreamConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: options.FromKubeconfig}, - kcpConfigOverrides).ClientConfig() - if err != nil { - return err - } - - upstreamConfig.QPS = options.QPS - upstreamConfig.Burst = options.Burst - - downstreamConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{ExplicitPath: options.ToKubeconfig}, - &clientcmd.ConfigOverrides{ - CurrentContext: options.ToContext, - }).ClientConfig() - if err != nil { - return err - } - - downstreamConfig.QPS = options.QPS - downstreamConfig.Burst = options.Burst - - namespace := os.Getenv("NAMESPACE") - if namespace == "" { - return errors.New("missing environment variable: NAMESPACE") - } - - return syncer.StartSyncer( - ctx, - &syncer.SyncerConfig{ - UpstreamConfig: upstreamConfig, - DownstreamConfig: downstreamConfig, - ResourcesToSync: sets.New[string](options.SyncedResourceTypes...), - SyncTargetPath: logicalcluster.NewPath(options.FromClusterPath), - SyncTargetName: options.SyncTargetName, - SyncTargetUID: options.SyncTargetUID, - DNSImage: options.DNSImage, - DownstreamNamespaceCleanDelay: options.DownstreamNamespaceCleanDelay, - }, - numThreads, - options.APIImportPollInterval, - namespace, - ) -} diff --git a/cmd/syncer/main.go b/cmd/syncer/main.go deleted file mode 100644 index 2284bcdc402..00000000000 --- a/cmd/syncer/main.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "os" - - "k8s.io/component-base/cli" - _ "k8s.io/component-base/logs/json/register" - - "github.com/kcp-dev/kcp/cmd/syncer/cmd" -) - -func main() { - syncerCommand := cmd.NewSyncerCommand() - code := cli.Run(syncerCommand) - os.Exit(code) -} diff --git a/cmd/syncer/options/dnsoptions.go b/cmd/syncer/options/dnsoptions.go deleted file mode 100644 index 87b74a44c44..00000000000 --- a/cmd/syncer/options/dnsoptions.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "errors" - - "github.com/spf13/pflag" -) - -type DNSOptions struct { - ConfigMapName string -} - -func NewDNSOptions() *DNSOptions { - return &DNSOptions{} -} - -func (options *DNSOptions) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&options.ConfigMapName, "configmap-name", options.ConfigMapName, "name of the ConfigMap containing namespace mappings") -} - -func (options *DNSOptions) Complete() error { - return nil -} - -func (options *DNSOptions) Validate() error { - if options.ConfigMapName == "" { - return errors.New("--configmap-name is required") - } - - return nil -} diff --git a/cmd/syncer/options/options.go b/cmd/syncer/options/options.go deleted file mode 100644 index 870cb982bf0..00000000000 --- a/cmd/syncer/options/options.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/spf13/pflag" - - "k8s.io/component-base/logs" - logsapiv1 "k8s.io/component-base/logs/api/v1" - - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type Options struct { - QPS float32 - Burst int - FromKubeconfig string - FromContext string - FromClusterPath string - ToKubeconfig string - ToContext string - SyncTargetName string - SyncTargetUID string - Logs *logs.Options - SyncedResourceTypes []string - DNSImage string - DownstreamNamespaceCleanDelay time.Duration - - APIImportPollInterval time.Duration -} - -func NewOptions() *Options { - // Default to -v=2 - logsOptions := logs.NewOptions() - logsOptions.Verbosity = logsapiv1.VerbosityLevel(2) - - return &Options{ - QPS: 30, - Burst: 20, - SyncedResourceTypes: []string{}, - Logs: logsOptions, - APIImportPollInterval: 1 * time.Minute, - DownstreamNamespaceCleanDelay: 30 * time.Second, - } -} - -func (options *Options) AddFlags(fs *pflag.FlagSet) { - fs.Float32Var(&options.QPS, "qps", options.QPS, "QPS to use when talking to API servers.") - fs.IntVar(&options.Burst, "burst", options.Burst, "Burst to use when talking to API servers.") - fs.StringVar(&options.FromKubeconfig, "from-kubeconfig", options.FromKubeconfig, "Kubeconfig file for -from cluster.") - fs.StringVar(&options.FromContext, "from-context", options.FromContext, "Context to use in the Kubeconfig file for -from cluster, instead of the current context.") - fs.StringVar(&options.FromClusterPath, "from-cluster", options.FromClusterPath, "Path of the -from logical cluster.") - fs.StringVar(&options.ToKubeconfig, "to-kubeconfig", options.ToKubeconfig, "Kubeconfig file for -to cluster. If not set, the InCluster configuration will be used.") - fs.StringVar(&options.ToContext, "to-context", options.ToContext, "Context to use in the Kubeconfig file for -to cluster, instead of the current context.") - fs.StringVar(&options.SyncTargetName, "sync-target-name", options.SyncTargetName, - fmt.Sprintf("ID of the -to cluster. Resources with this ID set in the '%s' label will be synced.", workloadv1alpha1.ClusterResourceStateLabelPrefix+"")) - fs.StringVar(&options.SyncTargetUID, "sync-target-uid", options.SyncTargetUID, "The UID from the SyncTarget resource in KCP.") - fs.StringArrayVarP(&options.SyncedResourceTypes, "resources", "r", options.SyncedResourceTypes, "Resources to be synchronized in kcp.") - fs.DurationVar(&options.APIImportPollInterval, "api-import-poll-interval", options.APIImportPollInterval, "Polling interval for API import.") - fs.Var(kcpfeatures.NewFlagValue(), "feature-gates", ""+ - "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ - "Options are:\n"+strings.Join(kcpfeatures.KnownFeatures(), "\n")) // hide kube-only gates - fs.StringVar(&options.DNSImage, "dns-image", options.DNSImage, "kcp DNS server image.") - fs.DurationVar(&options.DownstreamNamespaceCleanDelay, "downstream-namespace-clean-delay", options.DownstreamNamespaceCleanDelay, "Time to wait before deleting a downstream namespace, defaults to 30s.") - - logsapiv1.AddFlags(options.Logs, fs) -} - -func (options *Options) Complete() error { - return nil -} - -func (options *Options) Validate() error { - if options.FromClusterPath == "" { - return errors.New("--from-cluster is required") - } - if options.FromKubeconfig == "" { - return errors.New("--from-kubeconfig is required") - } - if options.SyncTargetUID == "" { - return errors.New("--sync-target-uid is required") - } - return nil -} diff --git a/cmd/virtual-workspaces/command/cmd.go b/cmd/virtual-workspaces/command/cmd.go index 40c4e042878..bc741dba729 100644 --- a/cmd/virtual-workspaces/command/cmd.go +++ b/cmd/virtual-workspaces/command/cmd.go @@ -43,7 +43,6 @@ import ( kcpfeatures "github.com/kcp-dev/kcp/pkg/features" "github.com/kcp-dev/kcp/pkg/server/bootstrap" virtualrootapiserver "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" - corevwoptions "github.com/kcp-dev/kcp/pkg/virtual/options" kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" ) @@ -179,16 +178,7 @@ func Run(ctx context.Context, o *options.Options) error { return o.ShardExternalURL } - coreVWs, err := o.CoreVirtualWorkspaces.NewVirtualWorkspaces(identityConfig, o.RootPathPrefix, sharedExternalURLGetter, wildcardKubeInformers, wildcardKcpInformers, cacheKcpInformers) - if err != nil { - return err - } - - tmcVWs, err := o.TmcVirtualWorkspaces.NewVirtualWorkspaces(identityConfig, o.RootPathPrefix, sharedExternalURLGetter, cacheKcpInformers) - if err != nil { - return err - } - rootAPIServerConfig.Extra.VirtualWorkspaces, err = corevwoptions.Merge(coreVWs, tmcVWs) + rootAPIServerConfig.Extra.VirtualWorkspaces, err = o.CoreVirtualWorkspaces.NewVirtualWorkspaces(identityConfig, o.RootPathPrefix, sharedExternalURLGetter, wildcardKubeInformers, wildcardKcpInformers, cacheKcpInformers) if err != nil { return err } diff --git a/cmd/virtual-workspaces/options/options.go b/cmd/virtual-workspaces/options/options.go index bbf5fe2ca10..418fc94b8d8 100644 --- a/cmd/virtual-workspaces/options/options.go +++ b/cmd/virtual-workspaces/options/options.go @@ -32,7 +32,6 @@ import ( cacheoptions "github.com/kcp-dev/kcp/pkg/cache/client/options" corevwoptions "github.com/kcp-dev/kcp/pkg/virtual/options" - tmcvwoptions "github.com/kcp-dev/kcp/tmc/pkg/virtual/options" ) // DefaultRootPathPrefix is basically constant forever, or we risk a breaking change. The @@ -57,7 +56,6 @@ type Options struct { Logs *logs.Options CoreVirtualWorkspaces corevwoptions.Options - TmcVirtualWorkspaces tmcvwoptions.Options ProfilerAddress string } @@ -77,7 +75,6 @@ func NewOptions() *Options { Logs: logs.NewOptions(), CoreVirtualWorkspaces: *corevwoptions.NewOptions(), - TmcVirtualWorkspaces: *tmcvwoptions.NewOptions(), ProfilerAddress: "", } @@ -95,7 +92,6 @@ func (o *Options) AddFlags(flags *pflag.FlagSet) { o.Audit.AddFlags(flags) logsapiv1.AddFlags(o.Logs, flags) o.CoreVirtualWorkspaces.AddFlags(flags) - o.TmcVirtualWorkspaces.AddFlags(flags) flags.StringVar(&o.ShardExternalURL, "shard-external-url", o.ShardExternalURL, "URL used by outside clients to talk to the kcp shard this virtual workspace is related to") @@ -113,7 +109,6 @@ func (o *Options) Validate() error { errs = append(errs, o.SecureServing.Validate()...) errs = append(errs, o.Authentication.Validate()...) errs = append(errs, o.CoreVirtualWorkspaces.Validate()...) - errs = append(errs, o.TmcVirtualWorkspaces.Validate()...) if len(o.ShardExternalURL) == 0 { errs = append(errs, fmt.Errorf(("--shard-external-url is required"))) diff --git a/hack/update-codegen-clients.sh b/hack/update-codegen-clients.sh index a0ad9c706ac..9e21e21da25 100755 --- a/hack/update-codegen-clients.sh +++ b/hack/update-codegen-clients.sh @@ -36,9 +36,7 @@ go install "${CODEGEN_PKG}"/cmd/client-gen --input-dirs github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1 \ --input-dirs github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1 \ --input-dirs github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1 \ - --input-dirs github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1 \ --input-dirs github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1 \ - --input-dirs github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1 \ --input-dirs github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1 \ --input-dirs k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 \ --output-package github.com/kcp-dev/kcp/sdk/client/applyconfiguration \ @@ -51,9 +49,7 @@ go install "${CODEGEN_PKG}"/cmd/client-gen --input github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1 \ --input github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1 \ --input github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1 \ - --input github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1 \ --input github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1 \ - --input github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1 \ --input-base="" \ --apply-configuration-package=github.com/kcp-dev/kcp/sdk/client/applyconfiguration \ --clientset-name "versioned" \ @@ -64,7 +60,7 @@ go install "${CODEGEN_PKG}"/cmd/client-gen bash "${CODEGEN_PKG}"/generate-groups.sh "deepcopy" \ github.com/kcp-dev/kcp/sdk/client github.com/kcp-dev/kcp/sdk/apis \ - "core:v1alpha1 workload:v1alpha1 apiresource:v1alpha1 tenancy:v1alpha1 apis:v1alpha1 scheduling:v1alpha1 topology:v1alpha1" \ + "core:v1alpha1 apiresource:v1alpha1 tenancy:v1alpha1 apis:v1alpha1 topology:v1alpha1" \ --go-header-file "${SCRIPT_ROOT}"/hack/boilerplate/boilerplate.generatego.txt \ --output-base "${SCRIPT_ROOT}" \ --trim-path-prefix github.com/kcp-dev/kcp @@ -121,16 +117,15 @@ popd go install "${CODEGEN_PKG}"/cmd/openapi-gen -"$GOPATH"/bin/openapi-gen --input-dirs github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1 \ ---input-dirs github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1 \ ---input-dirs k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version \ ---output-package github.com/kcp-dev/kcp/pkg/openapi -O zz_generated.openapi \ ---go-header-file ./hack/../hack/boilerplate/boilerplate.generatego.txt \ ---output-base "${SCRIPT_ROOT}" \ ---trim-path-prefix github.com/kcp-dev/kcp +"$GOPATH"/bin/openapi-gen \ + --input-dirs github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1 \ + --input-dirs github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1 \ + --input-dirs github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1 \ + --input-dirs github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1 \ + --input-dirs github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1 \ + --input-dirs github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1 \ + --input-dirs k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/version \ + --output-package github.com/kcp-dev/kcp/pkg/openapi -O zz_generated.openapi \ + --go-header-file ./hack/../hack/boilerplate/boilerplate.generatego.txt \ + --output-base "${SCRIPT_ROOT}" \ + --trim-path-prefix github.com/kcp-dev/kcp diff --git a/hack/update-codegen-crds.sh b/hack/update-codegen-crds.sh index c1f8a9924f6..4d424c0c3b9 100755 --- a/hack/update-codegen-crds.sh +++ b/hack/update-codegen-crds.sh @@ -44,15 +44,3 @@ for CRD in "${REPO_ROOT}"/config/crds/*.yaml; do mv "${CRD}.patched" "${CRD}" fi done - -${CONTROLLER_GEN} \ - crd \ - rbac:roleName=manager-role \ - webhook \ - paths="${REPO_ROOT}/test/e2e/reconciler/cluster/..." \ - output:crd:artifacts:config="${REPO_ROOT}"/test/e2e/reconciler/cluster/ - -( - cd "${REPO_ROOT}"/sdk/cmd/apigen - go run main.go --input-dir "${REPO_ROOT}"/config/crds --output-dir "${REPO_ROOT}"/config/root-phase0 -) diff --git a/pkg/admission/pathannotation/pathannotation_admission.go b/pkg/admission/pathannotation/pathannotation_admission.go index cf492399fa9..23caa374c10 100644 --- a/pkg/admission/pathannotation/pathannotation_admission.go +++ b/pkg/admission/pathannotation/pathannotation_admission.go @@ -33,7 +33,6 @@ import ( apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/core" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" corev1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/core/v1alpha1" @@ -70,7 +69,6 @@ type pathAnnotationPlugin struct { var pathAnnotationResources = sets.New[string]( apisv1alpha1.Resource("apiexports").String(), - schedulingv1alpha1.Resource("locations").String(), tenancyv1alpha1.Resource("workspacetypes").String(), ) diff --git a/pkg/admission/pathannotation/pathannotation_admission_test.go b/pkg/admission/pathannotation/pathannotation_admission_test.go index e4c2877d47d..2364ccc1afb 100644 --- a/pkg/admission/pathannotation/pathannotation_admission_test.go +++ b/pkg/admission/pathannotation/pathannotation_admission_test.go @@ -33,7 +33,6 @@ import ( apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/core" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" ) @@ -126,10 +125,10 @@ func TestPathAnnotationAdmit(t *testing.T) { }, { - name: "happy path: a Location is annotated with a path", + name: "happy path: a WorkspaceType is annotated with a path", admissionVerb: admission.Create, - admissionResource: schedulingv1alpha1.SchemeGroupVersion.WithResource("locations"), - admissionObject: &schedulingv1alpha1.Location{}, + admissionResource: tenancyv1alpha1.SchemeGroupVersion.WithResource("workspacetypes"), + admissionObject: &tenancyv1alpha1.WorkspaceType{}, admissionContext: admissionContextFor("foo"), getLogicalCluster: getCluster("foo"), validateAdmissionObject: objectHasPathAnnotation("root:foo"), diff --git a/pkg/admission/reservedmetadata/admission.go b/pkg/admission/reservedmetadata/admission.go index c45a64133df..e4fc969da96 100644 --- a/pkg/admission/reservedmetadata/admission.go +++ b/pkg/admission/reservedmetadata/admission.go @@ -29,11 +29,9 @@ import ( "k8s.io/utils/strings/slices" "github.com/kcp-dev/kcp/pkg/authorization" - "github.com/kcp-dev/kcp/pkg/syncer" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/core" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) const ( @@ -42,8 +40,6 @@ const ( var ( annotationAllowList = []string{ - workloadv1alpha1.AnnotationSkipDefaultObjectCreation, - syncer.AdvancedSchedulingFeatureAnnotation, tenancyv1alpha1.ExperimentalWorkspaceOwnerAnnotationKey, // protected by workspace admission from non-system:admins authorization.RequiredGroupsAnnotationKey, // protected by workspace admission from non-system:admins core.LogicalClusterPathAnnotationKey, // protected by pathannoation admission from non-system:admins diff --git a/pkg/cliplugins/bind/cmd/cmd.go b/pkg/cliplugins/bind/cmd/cmd.go index 7e06825961b..14f9b7164d7 100644 --- a/pkg/cliplugins/bind/cmd/cmd.go +++ b/pkg/cliplugins/bind/cmd/cmd.go @@ -31,17 +31,6 @@ var ( # Create an APIBinding named "my-binding" that binds to the APIExport "my-export" in the "root:my-service" workspace. %[1]s bind apiexport root:my-service:my-export --name my-binding ` - - bindComputeExampleUses = ` - # Create a placement to deploy standard kubernetes workloads to synctargets in the "root:mylocations" location workspace. - %[1]s bind compute root:mylocations - - # Create a placement to deploy custom workloads to synctargets in the "root:mylocations" location workspace. - %[1]s bind compute root:mylocations --apiexports=root:myapis:customapiexport - - # Create a placement to deploy standard kubernetes workloads to synctargets in the "root:mylocations" location workspace, and select only locations in the us-east region. - %[1]s bind compute root:mylocations --location-selectors=region=us-east1 - ` ) func New(streams genericclioptions.IOStreams) *cobra.Command { @@ -76,27 +65,5 @@ func New(streams genericclioptions.IOStreams) *cobra.Command { bindOpts.BindFlags(bindCmd) cmd.AddCommand(bindCmd) - - bindComputeOpts := plugin.NewBindComputeOptions(streams) - bindComputeCmd := &cobra.Command{ - Use: "compute ", - Short: "Bind to a location workspace", - Example: fmt.Sprintf(bindComputeExampleUses, "kubectl kcp"), - SilenceUsage: true, - RunE: func(cmd *cobra.Command, args []string) error { - if err := bindComputeOpts.Complete(args); err != nil { - return err - } - - if err := bindComputeOpts.Validate(); err != nil { - return err - } - - return bindComputeOpts.Run(cmd.Context()) - }, - } - bindComputeOpts.BindFlags(bindComputeCmd) - - cmd.AddCommand(bindComputeCmd) return cmd } diff --git a/pkg/cliplugins/bind/plugin/bind_compute.go b/pkg/cliplugins/bind/plugin/bind_compute.go deleted file mode 100644 index 6d27e48ff58..00000000000 --- a/pkg/cliplugins/bind/plugin/bind_compute.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "context" - "crypto/sha256" - "fmt" - "strings" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/martinlindhe/base36" - "github.com/spf13/cobra" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/cli-runtime/pkg/genericclioptions" - - "github.com/kcp-dev/kcp/pkg/cliplugins/base" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/core" - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - kcpclient "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" -) - -type BindComputeOptions struct { - *base.Options - - // PlacementName is the name of the placement - PlacementName string - - // APIExports is a list of APIExport to use in the workspace. - APIExports []string - - // Namespace selector is a label selector to select namespace for the workload. - namespaceSelector *metav1.LabelSelector - NamespaceSelectorString string - - // LocationSelectors is a list of label selectors to select locations in the location workspace. - locationSelectors []metav1.LabelSelector - LocationSelectorsStrings []string - - // LocationWorkspace is the workspace for synctarget - LocationWorkspace logicalcluster.Path - - // BindWaitTimeout is how long to wait for the placement to be created and successful. - BindWaitTimeout time.Duration -} - -func NewBindComputeOptions(streams genericclioptions.IOStreams) *BindComputeOptions { - return &BindComputeOptions{ - Options: base.NewOptions(streams), - NamespaceSelectorString: labels.Everything().String(), - LocationSelectorsStrings: []string{ - labels.Everything().String(), - }, - APIExports: []string{ - "root:compute:kubernetes", - }, - } -} - -// BindFlags binds fields SyncOptions as command line flags to cmd's flagset. -func (o *BindComputeOptions) BindFlags(cmd *cobra.Command) { - o.Options.BindFlags(cmd) - - cmd.Flags().StringSliceVar(&o.APIExports, "apiexports", o.APIExports, - "APIExport to bind to this workspace for workload, each APIExport should be in the format of :") - cmd.Flags().StringVar(&o.NamespaceSelectorString, "namespace-selector", o.NamespaceSelectorString, "Label select to select namespaces to create workload.") - cmd.Flags().StringSliceVar(&o.LocationSelectorsStrings, "location-selectors", o.LocationSelectorsStrings, - "A list of label selectors to select locations in the location workspace to sync workload.") - cmd.Flags().StringVar(&o.PlacementName, "name", o.PlacementName, "Name of the placement to be created.") - cmd.Flags().DurationVar(&o.BindWaitTimeout, "timeout", time.Second*30, "Duration to wait for Placement to be created and bound successfully.") -} - -// Complete ensures all dynamically populated fields are initialized. -func (o *BindComputeOptions) Complete(args []string) error { - if err := o.Options.Complete(); err != nil { - return err - } - - if len(args) != 1 { - return fmt.Errorf("a location workspace should be specified") - } - clusterName, validated := logicalcluster.NewValidatedPath(args[0]) - if !validated { - return fmt.Errorf("location workspace type is incorrect") - } - o.LocationWorkspace = clusterName - - var err error - if o.namespaceSelector, err = metav1.ParseToLabelSelector(o.NamespaceSelectorString); err != nil { - return fmt.Errorf("namespace selector format not correct: %w", err) - } - - for _, locSelector := range o.LocationSelectorsStrings { - selector, err := metav1.ParseToLabelSelector(locSelector) - if err != nil { - return fmt.Errorf("location selector %s format not correct: %w", locSelector, err) - } - o.locationSelectors = append(o.locationSelectors, *selector) - } - - if len(o.PlacementName) == 0 { - // placement name is a hash of location selectors and ns selector, with location workspace name as the prefix - hash := sha256.Sum224([]byte(o.NamespaceSelectorString + strings.Join(o.LocationSelectorsStrings, ",") + o.LocationWorkspace.String())) - base36hash := strings.ToLower(base36.EncodeBytes(hash[:])) - o.PlacementName = fmt.Sprintf("placement-%s", base36hash[:8]) - } - - return nil -} - -// Validate validates the BindOptions are complete and usable. -func (o *BindComputeOptions) Validate() error { - return nil -} - -// Run creates a placement in the workspace, linking to the location workspace. -func (o *BindComputeOptions) Run(ctx context.Context) error { - config, err := o.ClientConfig.ClientConfig() - if err != nil { - return err - } - userWorkspaceKcpClient, err := kcpclient.NewForConfig(config) - if err != nil { - return fmt.Errorf("failed to create kcp client: %w", err) - } - - // apply APIBindings - bindings, err := o.applyAPIBinding(ctx, userWorkspaceKcpClient, sets.New[string](o.APIExports...)) - if err != nil { - return err - } - - // and wait for them to be ready - var message string - if err := wait.PollImmediate(time.Millisecond*500, o.BindWaitTimeout, func() (done bool, err error) { - var ready bool - if ready, message = bindingsReady(bindings); ready { - return true, nil - } - - var updated []*apisv1alpha1.APIBinding - for _, binding := range bindings { - b, err := userWorkspaceKcpClient.ApisV1alpha1().APIBindings().Get(ctx, binding.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - updated = append(updated, b) - } - bindings = updated - return false, nil - }); err != nil && err.Error() == wait.ErrWaitTimeout.Error() { - return fmt.Errorf("APIBindings not ready: %s", message) - } else if err != nil { - return fmt.Errorf("APIBindings not ready: %w", err) - } - - // apply placement - if err := o.applyPlacement(ctx, userWorkspaceKcpClient); err != nil { - return err - } - - // and wait for it to be ready - if err := wait.PollImmediate(time.Millisecond*500, o.BindWaitTimeout, func() (done bool, err error) { - placement, err := userWorkspaceKcpClient.SchedulingV1alpha1().Placements().Get(ctx, o.PlacementName, metav1.GetOptions{}) - if err != nil { - return false, err - } - - done, message = placementReadyAndScheduled(placement) - return done, nil - }); err != nil && err.Error() == wait.ErrWaitTimeout.Error() { - return fmt.Errorf("placement %q not ready: %s", o.PlacementName, message) - } else if err != nil { - return fmt.Errorf("placement %q not ready: %w", o.PlacementName, err) - } - - _, err = fmt.Fprintf(o.IOStreams.ErrOut, "Placement %q is ready.\n", o.PlacementName) - return err -} - -func placementReadyAndScheduled(placement *schedulingv1alpha1.Placement) (bool, string) { - if !conditions.IsTrue(placement, schedulingv1alpha1.PlacementScheduled) { - if msg := conditions.GetMessage(placement, schedulingv1alpha1.PlacementScheduled); len(msg) > 0 { - return false, fmt.Sprintf("placement is not scheduled: %s", msg) - } - return false, "placement is not scheduled" - } - - if !conditions.IsTrue(placement, schedulingv1alpha1.PlacementReady) { - if msg := conditions.GetMessage(placement, schedulingv1alpha1.PlacementReady); msg != "" { - return false, fmt.Sprintf("placement is not ready: %s", msg) - } - return false, "placement is not ready" - } - - return true, "" -} - -func bindingsReady(bindings []*apisv1alpha1.APIBinding) (bool, string) { - for _, binding := range bindings { - if binding.Status.Phase == apisv1alpha1.APIBindingPhaseBound { - continue - } - - conditionMessage := "unknown reason" - if conditions.IsFalse(binding, apisv1alpha1.InitialBindingCompleted) { - conditionMessage = conditions.GetMessage(binding, apisv1alpha1.InitialBindingCompleted) - } else if conditions.IsFalse(binding, apisv1alpha1.APIExportValid) { - conditionMessage = conditions.GetMessage(binding, apisv1alpha1.APIExportValid) - } - path := logicalcluster.NewPath(binding.Spec.Reference.Export.Path) - var bindTo string - if path.Empty() { - bindTo = fmt.Sprintf("local APIExport %q", binding.Spec.Reference.Export.Name) - } else { - bindTo = fmt.Sprintf("APIExport %s", path.Join(binding.Spec.Reference.Export.Name)) - } - return false, fmt.Sprintf("APIBinding %s is not bound to APIExport %q yet: %s", binding.Name, bindTo, conditionMessage) - } - - return true, "" -} - -const maxBindingNamePrefixLength = validation.DNS1123SubdomainMaxLength - 1 - 8 - -func apiBindingName(clusterName logicalcluster.Path, apiExportName string) string { - maxLen := len(apiExportName) - if maxLen > maxBindingNamePrefixLength { - maxLen = maxBindingNamePrefixLength - } - bindingNamePrefix := apiExportName[:maxLen] - - hash := sha256.Sum224([]byte(clusterName.RequestPath())) - base36hash := strings.ToLower(base36.EncodeBytes(hash[:])) - return fmt.Sprintf("%s-%s", bindingNamePrefix, base36hash[:8]) -} - -func (o *BindComputeOptions) applyAPIBinding(ctx context.Context, client kcpclient.Interface, desiredAPIExports sets.Set[string]) ([]*apisv1alpha1.APIBinding, error) { - apiBindings, err := client.ApisV1alpha1().APIBindings().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - // the cluster name we use for local bindings. If there is a binding already, - // we use it to get the local cluster name. Otherwise, we use the empty string. - // This is important to not get confused about local bindings with empty - // path and those with the cluster name as path. - var localClusterName logicalcluster.Name - var localPath logicalcluster.Path - - existingAPIExports := sets.New[string]() - for i := range apiBindings.Items { - binding := apiBindings.Items[i] - if binding.Spec.Reference.Export == nil { - continue - } - // TODO(sttts): binding.Spec.Reference.Export.Path is not unique for one export. This whole method does not work reliably. - path := logicalcluster.NewPath(binding.Spec.Reference.Export.Path) - if path.Empty() { - path = logicalcluster.From(&binding).Path() - } - existingAPIExports.Insert(path.Join(binding.Spec.Reference.Export.Name).String()) - localClusterName = logicalcluster.From(&binding) - - // try to get the local path too, to be able to identify empty path, local cluster name and local path. - if localPath.Empty() { - cluster, err := client.CoreV1alpha1().LogicalClusters().Get(ctx, corev1alpha1.LogicalClusterName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - localPath = logicalcluster.NewPath(cluster.Annotations[core.LogicalClusterPathAnnotationKey]) - } - if !localPath.Empty() { - existingAPIExports.Insert(localPath.Join(binding.Spec.Reference.Export.Name).String()) - } - } - - if localClusterName != "" { - // add clusterName when missing such that our set logic works - old := desiredAPIExports - desiredAPIExports = sets.New[string]() - for _, export := range sets.List[string](old) { - path, name := logicalcluster.NewPath(export).Split() - if path.Empty() { - path = localClusterName.Path() - } - desiredAPIExports.Insert(path.Join(name).String()) - } - } - - var errs []error - diff := desiredAPIExports.Difference(existingAPIExports) - bindings := make([]*apisv1alpha1.APIBinding, 0, len(diff)) - for export := range diff { - path, name := logicalcluster.NewPath(export).Split() - if path == localClusterName.Path() { - // empty path for local bindings - path = logicalcluster.Path{} - } - apiBinding := &apisv1alpha1.APIBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: apiBindingName(path, name), - }, - Spec: apisv1alpha1.APIBindingSpec{ - Reference: apisv1alpha1.BindingReference{ - Export: &apisv1alpha1.ExportBindingReference{ - Path: path.String(), - Name: name, - }, - }, - }, - } - binding, err := client.ApisV1alpha1().APIBindings().Create(ctx, apiBinding, metav1.CreateOptions{}) - if err != nil && !errors.IsAlreadyExists(err) { - errs = append(errs, fmt.Errorf("failed binding APIExport %q: %w", path.Join(name), err)) - continue - } - - bindings = append(bindings, binding) - - if _, err = fmt.Fprintf(o.IOStreams.ErrOut, "Binding APIExport %q.\n", export); err != nil { - errs = append(errs, err) - } - } - - return bindings, utilerrors.NewAggregate(errs) -} - -func (o *BindComputeOptions) applyPlacement(ctx context.Context, client kcpclient.Interface) error { - placement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: o.PlacementName, - }, - Spec: schedulingv1alpha1.PlacementSpec{ - NamespaceSelector: o.namespaceSelector, - LocationSelectors: o.locationSelectors, - LocationWorkspace: o.LocationWorkspace.String(), - LocationResource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - }, - } - - _, err := client.SchedulingV1alpha1().Placements().Create(ctx, placement, metav1.CreateOptions{}) - if err != nil { - if errors.IsAlreadyExists(err) { - _, err = fmt.Fprintf(o.Out, "placement %s already exists.\n", o.PlacementName) - return err - } - - return err - } - - _, err = fmt.Fprintf(o.Out, "placement %s created.\n", o.PlacementName) - return err -} diff --git a/pkg/cliplugins/workload/cmd/cmd.go b/pkg/cliplugins/workload/cmd/cmd.go deleted file mode 100644 index a2b12259ba2..00000000000 --- a/pkg/cliplugins/workload/cmd/cmd.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" - - "k8s.io/cli-runtime/pkg/genericclioptions" - - "github.com/kcp-dev/kcp/pkg/cliplugins/workload/plugin" -) - -var ( - syncExample = ` - # Ensure a syncer is running on the specified sync target. - %[1]s workload sync --syncer-image -o syncer.yaml - KUBECONFIG= kubectl apply -f syncer.yaml - - # Directly apply the manifest - %[1]s workload sync --syncer-image -o - | KUBECONFIG= kubectl apply -f - -` - cordonExample = ` - # Mark a sync target as unschedulable. - %[1]s workload cordon -` - uncordonExample = ` - # Mark a sync target as schedulable. - %[1]s workload uncordon -` - drainExample = ` - # Start draining a sync target in preparation for maintenance. - %[1]s workload drain -` -) - -// New provides a cobra command for workload operations. -func New(streams genericclioptions.IOStreams) (*cobra.Command, error) { - cmd := &cobra.Command{ - Aliases: []string{"workloads"}, - Use: "workload", - Short: "Manages KCP sync targets", - SilenceUsage: true, - TraverseChildren: true, - RunE: func(cmd *cobra.Command, args []string) error { - return cmd.Help() - }, - } - - // Sync command - syncOptions := plugin.NewSyncOptions(streams) - - enableSyncerCmd := &cobra.Command{ - Use: "sync --syncer-image [--resources=,..] -o ", - Short: "Create a synctarget in kcp with service account and RBAC permissions. Output a manifest to deploy a syncer for the given sync target in a physical cluster.", - Example: fmt.Sprintf(syncExample, "kubectl kcp"), - SilenceUsage: true, - RunE: func(c *cobra.Command, args []string) error { - if len(args) != 1 { - return c.Help() - } - - if err := syncOptions.Complete(args); err != nil { - return err - } - - if err := syncOptions.Validate(); err != nil { - return err - } - - return syncOptions.Run(c.Context()) - }, - } - - syncOptions.BindFlags(enableSyncerCmd) - cmd.AddCommand(enableSyncerCmd) - - // Cordon command - cordonOpts := plugin.NewCordonOptions(streams) - cordonOpts.Cordon = true - - cordonCmd := &cobra.Command{ - Use: "cordon ", - Short: "Mark sync target as unschedulable", - Example: fmt.Sprintf(cordonExample, "kubectl kcp"), - SilenceUsage: true, - RunE: func(c *cobra.Command, args []string) error { - if len(args) != 1 { - return c.Help() - } - - if err := cordonOpts.Complete(args); err != nil { - return err - } - - if err := cordonOpts.Validate(); err != nil { - return err - } - - return cordonOpts.Run(c.Context()) - }, - } - - cordonOpts.BindFlags(cordonCmd) - cmd.AddCommand(cordonCmd) - - // Uncordon command - uncordonOpts := plugin.NewCordonOptions(streams) - uncordonOpts.Cordon = false - - uncordonCmd := &cobra.Command{ - Use: "uncordon ", - Short: "Mark sync target as schedulable", - Example: fmt.Sprintf(uncordonExample, "kubectl kcp"), - SilenceUsage: true, - RunE: func(c *cobra.Command, args []string) error { - if len(args) != 1 { - return c.Help() - } - - if err := uncordonOpts.Complete(args); err != nil { - return err - } - - if err := uncordonOpts.Validate(); err != nil { - return err - } - - return uncordonOpts.Run(c.Context()) - }, - } - - uncordonOpts.BindFlags(uncordonCmd) - cmd.AddCommand(uncordonCmd) - - // Drain command - drainOpts := plugin.NewDrainOptions(streams) - - drainCmd := &cobra.Command{ - Use: "drain ", - Short: "Start draining sync target in preparation for maintenance", - Example: fmt.Sprintf(drainExample, "kubectl kcp"), - SilenceUsage: true, - RunE: func(c *cobra.Command, args []string) error { - if len(args) != 1 { - return c.Help() - } - - if err := drainOpts.Complete(args); err != nil { - return err - } - - if err := drainOpts.Validate(); err != nil { - return err - } - - return drainOpts.Run(c.Context()) - }, - } - - drainOpts.BindFlags(drainCmd) - cmd.AddCommand(drainCmd) - - return cmd, nil -} diff --git a/pkg/cliplugins/workload/plugin/sync.go b/pkg/cliplugins/workload/plugin/sync.go deleted file mode 100644 index ad815ee144e..00000000000 --- a/pkg/cliplugins/workload/plugin/sync.go +++ /dev/null @@ -1,946 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "bytes" - "context" - "crypto/sha256" - "embed" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "os" - "sort" - "strings" - "text/template" - "time" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/martinlindhe/base36" - "github.com/spf13/cobra" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/cli-runtime/pkg/genericclioptions" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/cliplugins/base" - "github.com/kcp-dev/kcp/pkg/cliplugins/helpers" - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - "github.com/kcp-dev/kcp/pkg/reconciler/workload/apiexport" - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclient "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" -) - -//go:embed *.yaml -var embeddedResources embed.FS - -const ( - SyncerSecretConfigKey = "kubeconfig" - SyncerIDPrefix = "kcp-syncer-" - DNSIDPrefix = "kcp-dns-" - MaxSyncTargetNameLength = validation.DNS1123SubdomainMaxLength - (9 + len(SyncerIDPrefix)) -) - -// SyncOptions contains options for configuring a SyncTarget and its corresponding syncer. -type SyncOptions struct { - *base.Options - - // ResourcesToSync is a list of fully-qualified resource names that should be synced by the syncer. - ResourcesToSync []string - // APIExports is a list of APIExport to be supported by the synctarget. - APIExports []string - // SyncerImage is the container image that should be used for the syncer. - SyncerImage string - // Replicas is the number of replicas to configure in the syncer's deployment. - Replicas int - // OutputFile is the path to a file where the YAML for the syncer should be written. - OutputFile string - // DownstreamNamespace is the name of the namespace in the physical cluster where the syncer deployment is created. - DownstreamNamespace string - // KCPNamespace is the name of the namespace in the kcp workspace where the service account is created for the - // syncer. - KCPNamespace string - // QPS is the refill rate for the syncer client's rate limiter bucket (steady state requests per second). - QPS float32 - // Burst is the maximum size for the syncer client's rate limiter bucket when idle. - Burst int - // SyncTargetName is the name of the SyncTarget in the kcp workspace. - SyncTargetName string - // SyncTargetLabels are the labels to be applied to the SyncTarget in the kcp workspace. - SyncTargetLabels []string - // APIImportPollInterval is the time interval to push apiimport. - APIImportPollInterval time.Duration - // FeatureGates is used to configure which feature gates are enabled. - FeatureGates string - // DownstreamNamespaceCleanDelay is the time to wait before deleting of a downstream namespace. - DownstreamNamespaceCleanDelay time.Duration -} - -// NewSyncOptions returns a new SyncOptions. -func NewSyncOptions(streams genericclioptions.IOStreams) *SyncOptions { - return &SyncOptions{ - Options: base.NewOptions(streams), - - Replicas: 1, - KCPNamespace: "default", - QPS: 20, - Burst: 30, - APIImportPollInterval: 1 * time.Minute, - APIExports: []string{"root:compute:kubernetes"}, - DownstreamNamespaceCleanDelay: 30 * time.Second, - } -} - -// BindFlags binds fields SyncOptions as command line flags to cmd's flagset. -func (o *SyncOptions) BindFlags(cmd *cobra.Command) { - o.Options.BindFlags(cmd) - - cmd.Flags().StringSliceVar(&o.ResourcesToSync, "resources", o.ResourcesToSync, "Resources to synchronize with kcp, each resource should be in the format of resourcename.,"+ - "e.g. to sync routes to physical cluster the resource name should be given as --resource routes.route.openshift.io") - cmd.Flags().StringSliceVar(&o.APIExports, "apiexports", o.APIExports, - "APIExport to be supported by the syncer, each APIExport should be in the format of :, "+ - "e.g. root:compute:kubernetes is the kubernetes APIExport in root:compute workspace") - cmd.Flags().StringVar(&o.SyncerImage, "syncer-image", o.SyncerImage, "The syncer image to use in the syncer's deployment YAML. Images are published at https://github.com/kcp-dev/kcp/pkgs/container/kcp%2Fsyncer.") - cmd.Flags().IntVar(&o.Replicas, "replicas", o.Replicas, "Number of replicas of the syncer deployment.") - cmd.Flags().StringVar(&o.KCPNamespace, "kcp-namespace", o.KCPNamespace, "The name of the kcp namespace to create a service account in.") - cmd.Flags().StringVarP(&o.OutputFile, "output-file", "o", o.OutputFile, "The manifest file to be created and applied to the physical cluster. Use - for stdout.") - cmd.Flags().StringVarP(&o.DownstreamNamespace, "namespace", "n", o.DownstreamNamespace, "The namespace to create the syncer in the physical cluster. By default this is \"kcp-syncer--\".") - cmd.Flags().Float32Var(&o.QPS, "qps", o.QPS, "QPS to use when talking to API servers.") - cmd.Flags().IntVar(&o.Burst, "burst", o.Burst, "Burst to use when talking to API servers.") - cmd.Flags().StringVar(&o.FeatureGates, "feature-gates", o.FeatureGates, - "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ - "Options are:\n"+strings.Join(kcpfeatures.KnownFeatures(), "\n")) // hide kube-only gates - cmd.Flags().DurationVar(&o.APIImportPollInterval, "api-import-poll-interval", o.APIImportPollInterval, "Polling interval for API import.") - cmd.Flags().DurationVar(&o.DownstreamNamespaceCleanDelay, "downstream-namespace-clean-delay", o.DownstreamNamespaceCleanDelay, "Time to wait before deleting a downstream namespaces.") - cmd.Flags().StringSliceVar(&o.SyncTargetLabels, "labels", o.SyncTargetLabels, "Labels to apply on the SyncTarget created in kcp, each label should be in the format of key=value.") -} - -// Complete ensures all dynamically populated fields are initialized. -func (o *SyncOptions) Complete(args []string) error { - if err := o.Options.Complete(); err != nil { - return err - } - - o.SyncTargetName = args[0] - - return nil -} - -// Validate validates the SyncOptions are complete and usable. -func (o *SyncOptions) Validate() error { - var errs []error - - if err := o.Options.Validate(); err != nil { - errs = append(errs, err) - } - - if o.SyncerImage == "" { - errs = append(errs, errors.New("--syncer-image is required")) - } - - if o.KCPNamespace == "" { - errs = append(errs, errors.New("--kcp-namespace is required")) - } - - if o.Replicas < 0 { - errs = append(errs, errors.New("--replicas cannot be negative")) - } - if o.Replicas > 1 { - // TODO: relax when we have leader-election in the syncer - errs = append(errs, errors.New("only 0 and 1 are valid values for --replicas")) - } - - if o.OutputFile == "" { - errs = append(errs, errors.New("--output-file is required")) - } - - // see pkg/syncer/shared/GetDNSID - if len(o.SyncTargetName)+len(DNSIDPrefix)+8+8+2 > 254 { - errs = append(errs, fmt.Errorf("the maximum length of the sync-target-name is %d", MaxSyncTargetNameLength)) - } - - for _, l := range o.SyncTargetLabels { - if len(strings.Split(l, "=")) != 2 { - errs = append(errs, fmt.Errorf("label '%s' is not in the format of key=value", l)) - } - } - - for _, apiExport := range o.APIExports { - _, name := logicalcluster.NewPath(apiExport).Split() - if name == workloadv1alpha1.ImportedAPISExportName { - errs = append(errs, fmt.Errorf("%s is a reserved APIExport name and should not be set", workloadv1alpha1.ImportedAPISExportName)) - } - } - - return utilerrors.NewAggregate(errs) -} - -// Run prepares a kcp workspace for use with a syncer and outputs the -// configuration required to deploy a syncer to the pcluster to stdout. -func (o *SyncOptions) Run(ctx context.Context) error { - config, err := o.ClientConfig.ClientConfig() - if err != nil { - return err - } - - var output io.Writer - if o.OutputFile == "-" { - output = o.IOStreams.Out - } else { - outputFile, err := os.Create(o.OutputFile) - if err != nil { - return err - } - defer outputFile.Close() - output = outputFile - } - - labels := map[string]string{} - for _, l := range o.SyncTargetLabels { - parts := strings.Split(l, "=") - if len(parts) != 2 { - continue - } - labels[parts[0]] = parts[1] - } - - token, syncerID, syncTarget, err := o.enableSyncerForWorkspace(ctx, config, o.SyncTargetName, o.KCPNamespace, labels) - if err != nil { - return err - } - - expectedResourcesForPermission, err := o.getResourcesForPermission(ctx, config, o.SyncTargetName) - if err != nil { - return err - } - - configURL, _, err := helpers.ParseClusterURL(config.Host) - if err != nil { - return fmt.Errorf("current URL %q does not point to workspace", config.Host) - } - - // Make sure the generated URL has the port specified correctly. - if _, _, err = net.SplitHostPort(configURL.Host); err != nil { - var addrErr *net.AddrError - const missingPort = "missing port in address" - if errors.As(err, &addrErr) && addrErr.Err == missingPort { - if configURL.Scheme == "https" { - configURL.Host = net.JoinHostPort(configURL.Host, "443") - } else { - configURL.Host = net.JoinHostPort(configURL.Host, "80") - } - } else { - return fmt.Errorf("failed to parse host %q: %w", configURL.Host, err) - } - } - - if o.DownstreamNamespace == "" { - o.DownstreamNamespace = syncerID - } - - // Compose the syncer's upstream configuration server URL without any path. This is - // required so long as the API importer and syncer expect to require cluster clients. - // - // TODO(marun) It's probably preferable that the syncer and importer are provided a - // cluster configuration since they only operate against a single workspace. - serverURL := configURL.Scheme + "://" + configURL.Host - input := templateInput{ - ServerURL: serverURL, - CAData: base64.StdEncoding.EncodeToString(config.CAData), - Token: token, - KCPNamespace: o.KCPNamespace, - Namespace: o.DownstreamNamespace, - - SyncTargetPath: logicalcluster.From(syncTarget).Path().String(), - SyncTarget: o.SyncTargetName, - SyncTargetUID: string(syncTarget.UID), - - Image: o.SyncerImage, - Replicas: o.Replicas, - ResourcesToSync: o.ResourcesToSync, - QPS: o.QPS, - Burst: o.Burst, - FeatureGatesString: o.FeatureGates, - APIImportPollIntervalString: o.APIImportPollInterval.String(), - DownstreamNamespaceCleanDelayString: o.DownstreamNamespaceCleanDelay.String(), - } - - resources, err := renderSyncerResources(input, syncerID, sets.List[string](expectedResourcesForPermission)) - if err != nil { - return err - } - - _, err = output.Write(resources) - if o.OutputFile != "-" { - fmt.Fprintf(o.ErrOut, "\nWrote physical cluster manifest to %s for namespace %q. Use\n\n KUBECONFIG= kubectl apply -f %q\n\nto apply it. "+ - "Use\n\n KUBECONFIG= kubectl get deployment -n %q %s\n\nto verify the syncer pod is running.\n", o.OutputFile, o.DownstreamNamespace, o.OutputFile, o.DownstreamNamespace, syncerID) - } - return err -} - -// getSyncerID returns a unique ID for a syncer derived from the name and its UID. It's -// a valid DNS segment and can be used as namespace or object names. -func getSyncerID(syncTarget *workloadv1alpha1.SyncTarget) string { - syncerHash := sha256.Sum224([]byte(syncTarget.UID)) - base36hash := strings.ToLower(base36.EncodeBytes(syncerHash[:])) - return fmt.Sprintf("kcp-syncer-%s-%s", syncTarget.Name, base36hash[:8]) -} - -func (o *SyncOptions) applySyncTarget(ctx context.Context, kcpClient kcpclient.Interface, syncTargetName string, labels map[string]string) (*workloadv1alpha1.SyncTarget, error) { - supportedAPIExports := make([]tenancyv1alpha1.APIExportReference, 0, len(o.APIExports)) - for _, export := range o.APIExports { - lclusterName, name := logicalcluster.NewPath(export).Split() - supportedAPIExports = append(supportedAPIExports, tenancyv1alpha1.APIExportReference{ - Export: name, - Path: lclusterName.String(), - }) - } - - // create local apiexport if resources flag is set - if len(o.ResourcesToSync) > 0 { - apiExport, err := kcpClient.ApisV1alpha1().APIExports().Get(ctx, workloadv1alpha1.ImportedAPISExportName, metav1.GetOptions{}) - switch { - case apierrors.IsNotFound(err): - fmt.Fprintf(o.ErrOut, "Creating APIExport %q\n", workloadv1alpha1.ImportedAPISExportName) - apiExport = &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: workloadv1alpha1.ImportedAPISExportName, - Annotations: map[string]string{ - workloadv1alpha1.ComputeAPIExportAnnotationKey: "true", - }, - }, - Spec: apisv1alpha1.APIExportSpec{ - LatestResourceSchemas: []string{}, - }, - } - apiExport, _ = mergeLatestResourceSchema(apiExport, o.ResourcesToSync) - _, err = kcpClient.ApisV1alpha1().APIExports().Create(ctx, apiExport, metav1.CreateOptions{}) - if err != nil && !apierrors.IsAlreadyExists(err) { - return nil, err - } - case err != nil: - return nil, err - default: - if apiExport, modified := mergeLatestResourceSchema(apiExport, o.ResourcesToSync); modified { - _, err = kcpClient.ApisV1alpha1().APIExports().Update(ctx, apiExport, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - } - } - - // if ResourcesToSync is not empty, add export in synctarget workspace. - if !sets.New[string](o.APIExports...).Has(workloadv1alpha1.ImportedAPISExportName) { - supportedAPIExports = append(supportedAPIExports, tenancyv1alpha1.APIExportReference{ - Export: workloadv1alpha1.ImportedAPISExportName, - }) - } - } - - syncTarget, err := kcpClient.WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - - switch { - case apierrors.IsNotFound(err): - // Create the sync target that will serve as a point of coordination between - // kcp and the syncer (e.g. heartbeating from the syncer and virtual cluster urls - // to the syncer). - fmt.Fprintf(o.ErrOut, "Creating synctarget %q\n", syncTargetName) - syncTarget, err = kcpClient.WorkloadV1alpha1().SyncTargets().Create(ctx, - &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: syncTargetName, - Labels: labels, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - SupportedAPIExports: supportedAPIExports, - }, - }, - metav1.CreateOptions{}, - ) - if err != nil && !apierrors.IsAlreadyExists(err) { - return nil, fmt.Errorf("failed to create synctarget %q: %w", syncTargetName, err) - } - if err == nil { - return syncTarget, nil - } - case err != nil: - return nil, err - } - - if equality.Semantic.DeepEqual(labels, syncTarget.ObjectMeta.Labels) && equality.Semantic.DeepEqual(supportedAPIExports, syncTarget.Spec.SupportedAPIExports) { - return syncTarget, nil - } - - // Patch synctarget with updated exports - oldData, err := json.Marshal(workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Labels: syncTarget.ObjectMeta.Labels, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - SupportedAPIExports: syncTarget.Spec.SupportedAPIExports, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to Marshal old data for syncTarget %s: %w", syncTargetName, err) - } - - newData, err := json.Marshal(workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - UID: syncTarget.UID, - ResourceVersion: syncTarget.ResourceVersion, - Labels: labels, - }, // to ensure they appear in the patch as preconditions - Spec: workloadv1alpha1.SyncTargetSpec{ - SupportedAPIExports: supportedAPIExports, - }, - }) - if err != nil { - return nil, fmt.Errorf("failed to Marshal new data for syncTarget %s: %w", syncTargetName, err) - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return nil, fmt.Errorf("failed to create merge patch for syncTarget %q because: %w", syncTargetName, err) - } - - if syncTarget, err = kcpClient.WorkloadV1alpha1().SyncTargets().Patch(ctx, syncTargetName, types.MergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return nil, fmt.Errorf("failed to patch syncTarget %s: %w", syncTargetName, err) - } - return syncTarget, nil -} - -// getResourcesForPermission get all resources to sync from syncTarget status and resources flags. It is used to generate the rbac on -// physical cluster for syncer. -func (o *SyncOptions) getResourcesForPermission(ctx context.Context, config *rest.Config, syncTargetName string) (sets.Set[string], error) { - kcpClient, err := kcpclient.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to create kcp client: %w", err) - } - - // Poll synctarget to get all resources to sync, the ResourcesToSync set from the flag should be also added, since - // its related APIResourceSchemas will not be added until the syncer is started. - expectedResourcesForPermission := sets.New[string](o.ResourcesToSync...) - // secrets and configmaps are always needed. - expectedResourcesForPermission.Insert("secrets", "configmaps") - err = wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 30*time.Second, func(ctx context.Context) (bool, error) { - syncTarget, err := kcpClient.WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - if err != nil { - return false, nil //nolint:nilerr - } - - if len(syncTarget.Spec.SupportedAPIExports) == 0 { - return true, nil - } - - // skip if there is only the local imported-apis APIExport in the synctarget workspace, since we may not get syncedResources yet. - if len(syncTarget.Spec.SupportedAPIExports) == 1 && - syncTarget.Spec.SupportedAPIExports[0].Export == workloadv1alpha1.ImportedAPISExportName { - return true, nil - } - - if len(syncTarget.Status.SyncedResources) == 0 { - return false, nil - } - for _, rs := range syncTarget.Status.SyncedResources { - expectedResourcesForPermission.Insert(fmt.Sprintf("%s.%s", rs.Resource, rs.Group)) - } - return true, nil - }) - if err != nil { - return nil, fmt.Errorf("error waiting for getting resources to sync in syncTarget %s, %w", syncTargetName, err) - } - - return expectedResourcesForPermission, nil -} - -// enableSyncerForWorkspace creates a sync target with the given name and creates a service -// account for the syncer in the given namespace. The expectation is that the provided config is -// for a logical cluster (workspace). Returns the token the syncer will use to connect to kcp. -func (o *SyncOptions) enableSyncerForWorkspace(ctx context.Context, config *rest.Config, syncTargetName, namespace string, labels map[string]string) (saToken string, syncerID string, syncTarget *workloadv1alpha1.SyncTarget, err error) { - kcpClient, err := kcpclient.NewForConfig(config) - if err != nil { - return "", "", nil, fmt.Errorf("failed to create kcp client: %w", err) - } - - syncTarget, err = o.applySyncTarget(ctx, kcpClient, syncTargetName, labels) - if err != nil { - return "", "", nil, fmt.Errorf("failed to apply synctarget %q: %w", syncTargetName, err) - } - - kubeClient, err := kubernetes.NewForConfig(config) - if err != nil { - return "", "", nil, fmt.Errorf("failed to create kubernetes client: %w", err) - } - - syncerID = getSyncerID(syncTarget) - - syncTargetOwnerReferences := []metav1.OwnerReference{{ - APIVersion: workloadv1alpha1.SchemeGroupVersion.String(), - Kind: "SyncTarget", - Name: syncTarget.Name, - UID: syncTarget.UID, - }} - sa, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, syncerID, metav1.GetOptions{}) - - switch { - case apierrors.IsNotFound(err): - fmt.Fprintf(o.ErrOut, "Creating service account %q\n", syncerID) - if sa, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: syncerID, - OwnerReferences: syncTargetOwnerReferences, - }, - }, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { - return "", "", nil, fmt.Errorf("failed to create ServiceAccount %s|%s/%s: %w", syncTargetName, namespace, syncerID, err) - } - case err == nil: - oldData, err := json.Marshal(corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: sa.OwnerReferences, - }, - }) - if err != nil { - return "", "", nil, fmt.Errorf("failed to marshal old data for ServiceAccount %s|%s/%s: %w", syncTargetName, namespace, syncerID, err) - } - - newData, err := json.Marshal(corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - UID: sa.UID, - ResourceVersion: sa.ResourceVersion, - OwnerReferences: mergeOwnerReference(sa.ObjectMeta.OwnerReferences, syncTargetOwnerReferences), - }, - }) - if err != nil { - return "", "", nil, fmt.Errorf("failed to marshal new data for ServiceAccount %s|%s/%s: %w", syncTargetName, namespace, syncerID, err) - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return "", "", nil, fmt.Errorf("failed to create patch for ServiceAccount %s|%s/%s: %w", syncTargetName, namespace, syncerID, err) - } - - fmt.Fprintf(o.ErrOut, "Updating service account %q.\n", syncerID) - if sa, err = kubeClient.CoreV1().ServiceAccounts(namespace).Patch(ctx, sa.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return "", "", nil, fmt.Errorf("failed to patch ServiceAccount %s|%s/%s: %w", syncTargetName, syncerID, namespace, err) - } - default: - return "", "", nil, fmt.Errorf("failed to get the ServiceAccount %s|%s/%s: %w", syncTargetName, syncerID, namespace, err) - } - - // Create a cluster role that provides the syncer the minimal permissions - // required by KCP to manage the sync target, and by the syncer virtual - // workspace to sync. - rules := []rbacv1.PolicyRule{ - { - Verbs: []string{"sync"}, - APIGroups: []string{workloadv1alpha1.SchemeGroupVersion.Group}, - ResourceNames: []string{syncTargetName}, - Resources: []string{"synctargets"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{workloadv1alpha1.SchemeGroupVersion.Group}, - ResourceNames: []string{syncTargetName}, - Resources: []string{"synctargets/tunnel"}, - }, - { - Verbs: []string{"get", "list", "watch"}, - APIGroups: []string{workloadv1alpha1.SchemeGroupVersion.Group}, - Resources: []string{"synctargets"}, - ResourceNames: []string{syncTargetName}, - }, - { - Verbs: []string{"update", "patch"}, - APIGroups: []string{workloadv1alpha1.SchemeGroupVersion.Group}, - ResourceNames: []string{syncTargetName}, - Resources: []string{"synctargets/status"}, - }, - { - Verbs: []string{"get", "create", "update", "delete", "list", "watch"}, - APIGroups: []string{apiresourcev1alpha1.SchemeGroupVersion.Group}, - Resources: []string{"apiresourceimports"}, - }, - { - Verbs: []string{"access"}, - NonResourceURLs: []string{"/"}, - }, - } - - cr, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, - syncerID, - metav1.GetOptions{}) - switch { - case apierrors.IsNotFound(err): - fmt.Fprintf(o.ErrOut, "Creating cluster role %q to give service account %q\n\n 1. write and sync access to the synctarget %q\n 2. write access to apiresourceimports.\n\n", syncerID, syncerID, syncerID) - if _, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: syncerID, - OwnerReferences: syncTargetOwnerReferences, - }, - Rules: rules, - }, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { - return "", "", nil, err - } - case err == nil: - oldData, err := json.Marshal(rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - OwnerReferences: cr.OwnerReferences, - }, - Rules: cr.Rules, - }) - if err != nil { - return "", "", nil, fmt.Errorf("failed to marshal old data for ClusterRole %s|%s: %w", syncTargetName, syncerID, err) - } - - newData, err := json.Marshal(rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - UID: cr.UID, - ResourceVersion: cr.ResourceVersion, - OwnerReferences: mergeOwnerReference(cr.OwnerReferences, syncTargetOwnerReferences), - }, - Rules: rules, - }) - if err != nil { - return "", "", nil, fmt.Errorf("failed to marshal new data for ClusterRole %s|%s: %w", syncTargetName, syncerID, err) - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return "", "", nil, fmt.Errorf("failed to create patch for ClusterRole %s|%s: %w", syncTargetName, syncerID, err) - } - - fmt.Fprintf(o.ErrOut, "Updating cluster role %q with\n\n 1. write and sync access to the synctarget %q\n 2. write access to apiresourceimports.\n\n", syncerID, syncerID) - if _, err = kubeClient.RbacV1().ClusterRoles().Patch(ctx, cr.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return "", "", nil, fmt.Errorf("failed to patch ClusterRole %s|%s/%s: %w", syncTargetName, syncerID, namespace, err) - } - default: - return "", "", nil, err - } - - // Grant the service account the role created just above in the workspace - subjects := []rbacv1.Subject{{ - Kind: "ServiceAccount", - Name: syncerID, - Namespace: namespace, - }} - roleRef := rbacv1.RoleRef{ - Kind: "ClusterRole", - Name: syncerID, - APIGroup: "rbac.authorization.k8s.io", - } - - _, err = kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, - syncerID, - metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return "", "", nil, err - } - if err == nil { - if err := kubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, syncerID, metav1.DeleteOptions{}); err != nil { - return "", "", nil, err - } - } - - fmt.Fprintf(o.ErrOut, "Creating or updating cluster role binding %q to bind service account %q to cluster role %q.\n", syncerID, syncerID, syncerID) - if _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: syncerID, - OwnerReferences: syncTargetOwnerReferences, - }, - Subjects: subjects, - RoleRef: roleRef, - }, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { - return "", "", nil, err - } - - // Wait for the service account to be updated with the name of the token secret - tokenSecretName := "" - err = wait.PollImmediateWithContext(ctx, 100*time.Millisecond, 20*time.Second, func(ctx context.Context) (bool, error) { - serviceAccount, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, sa.Name, metav1.GetOptions{}) - if err != nil { - klog.FromContext(ctx).V(5).WithValues("err", err).Info("failed to retrieve ServiceAccount") - return false, nil - } - if len(serviceAccount.Secrets) == 0 { - return false, nil - } - tokenSecretName = serviceAccount.Secrets[0].Name - return true, nil - }) - if err != nil { - return "", "", nil, fmt.Errorf("timed out waiting for token secret name to be set on ServiceAccount %s/%s", namespace, sa.Name) - } - - // Retrieve the token that the syncer will use to authenticate to kcp - tokenSecret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, tokenSecretName, metav1.GetOptions{}) - if err != nil { - return "", "", nil, fmt.Errorf("failed to retrieve Secret: %w", err) - } - saTokenBytes := tokenSecret.Data["token"] - if len(saTokenBytes) == 0 { - return "", "", nil, fmt.Errorf("token secret %s/%s is missing a value for `token`", namespace, tokenSecretName) - } - - return string(saTokenBytes), syncerID, syncTarget, nil -} - -func mergeLatestResourceSchema(apiExport *apisv1alpha1.APIExport, resourceToSync []string) (*apisv1alpha1.APIExport, bool) { - desiredResourceGroup := sets.New[string]() - var modified bool - for _, schema := range apiExport.Spec.LatestResourceSchemas { - gr, valid := apiexport.ParseAPIResourceSchemaName(schema) - if !valid { - continue - } - desiredResourceGroup.Insert(gr.String()) - } - for _, resource := range resourceToSync { - gr := schema.ParseGroupResource(resource) - if len(gr.Group) == 0 { - gr.Group = "core" - } - if !desiredResourceGroup.Has(gr.String()) { - // the rev-0 here is a placeholder and will be replaced by rv of negotiated APIResourceSchema finally. - schemaName := fmt.Sprintf("rev-0.%s", gr.String()) - apiExport.Spec.LatestResourceSchemas = append(apiExport.Spec.LatestResourceSchemas, schemaName) - modified = true - } - } - - return apiExport, modified -} - -// mergeOwnerReference: merge a slice of ownerReference with a given ownerReferences. -func mergeOwnerReference(ownerReferences, newOwnerReferences []metav1.OwnerReference) []metav1.OwnerReference { - var merged []metav1.OwnerReference - - merged = append(merged, ownerReferences...) - - for _, ownerReference := range newOwnerReferences { - found := false - for _, mergedOwnerReference := range merged { - if mergedOwnerReference.UID == ownerReference.UID { - found = true - break - } - } - if !found { - merged = append(merged, ownerReference) - } - } - - return merged -} - -// templateInput represents the external input required to render the resources to -// deploy the syncer to a pcluster. -type templateInput struct { - // ServerURL is the logical cluster url the syncer configuration will use - ServerURL string - // CAData holds the PEM-encoded bytes of the ca certificate(s) a syncer will use to validate - // kcp's serving certificate - CAData string - // Token is the service account token used to authenticate a syncer for access to a workspace - Token string - // KCPNamespace is the name of the kcp namespace of the syncer's service account - KCPNamespace string - // Namespace is the name of the syncer namespace on the pcluster - Namespace string - // SyncTargetPath is the qualified kcp logical cluster name the syncer will sync from - SyncTargetPath string - // SyncTarget is the name of the sync target the syncer will use to - // communicate its status and read configuration from - SyncTarget string - // SyncTargetUID is the UID of the sync target the syncer will use to - // communicate its status and read configuration from. This information is used by the - // Syncer in order to avoid a conflict when a synctarget gets deleted and another one is - // created with the same name. - SyncTargetUID string - // ResourcesToSync is the set of qualified resource names (eg. ["services", - // "deployments.apps.k8s.io") that the syncer will synchronize between the kcp - // workspace and the pcluster. - ResourcesToSync []string - // Image is the name of the container image that the syncer deployment will use - Image string - // Replicas is the number of syncer pods to run (should be 0 or 1). - Replicas int - // QPS is the qps the syncer uses when talking to an apiserver. - QPS float32 - // Burst is the burst the syncer uses when talking to an apiserver. - Burst int - // FeatureGatesString is the set of features gates. - FeatureGatesString string - // APIImportPollIntervalString is the string of interval to poll APIImport. - APIImportPollIntervalString string - // DownstreamNamespaceCleanDelay is the time to delay before cleaning the downstream namespace as a string. - DownstreamNamespaceCleanDelayString string -} - -// templateArgs represents the full set of arguments required to render the resources -// required to deploy the syncer. -type templateArgs struct { - templateInput - // ServiceAccount is the name of the service account to create in the syncer - // namespace on the pcluster. - ServiceAccount string - // ClusterRole is the name of the cluster role to create for the syncer on the - // pcluster. - ClusterRole string - // ClusterRoleBinding is the name of the cluster role binding to create for the - // syncer on the pcluster. - ClusterRoleBinding string - // DnsRole is the name of the DNS role to create for the syncer on the pcluster. - DNSRole string - // DNSRoleBinding is the name of the DNS role binding to create for the - // syncer on the pcluster. - DNSRoleBinding string - // GroupMappings is the mapping of api group to resources that will be used to - // define the cluster role rules for the syncer in the pcluster. The syncer will be - // granted full permissions for the resources it will synchronize. - GroupMappings []groupMapping - // Secret is the name of the secret that will contain the kubeconfig the syncer - // will use to connect to the kcp logical cluster (workspace) that it will - // synchronize from. - Secret string - // Key in the syncer secret for the kcp logical cluster kubconfig. - SecretConfigKey string - // Deployment is the name of the deployment that will run the syncer in the - // pcluster. - Deployment string - // DeploymentApp is the label value that the syncer's deployment will select its - // pods with. - DeploymentApp string -} - -// renderSyncerResources renders the resources required to deploy a syncer to a pcluster. -// -// TODO(marun) Is it possible to set owner references in a set of applied resources? Ideally the -// cluster role and role binding would be owned by the namespace to ensure cleanup on deletion -// of the namespace. -func renderSyncerResources(input templateInput, syncerID string, resourceForPermission []string) ([]byte, error) { - dnsSyncerID := strings.Replace(syncerID, "syncer", "dns", 1) - - tmplArgs := templateArgs{ - templateInput: input, - ServiceAccount: syncerID, - ClusterRole: syncerID, - ClusterRoleBinding: syncerID, - DNSRole: dnsSyncerID, - DNSRoleBinding: dnsSyncerID, - GroupMappings: getGroupMappings(resourceForPermission), - Secret: syncerID, - SecretConfigKey: SyncerSecretConfigKey, - Deployment: syncerID, - DeploymentApp: syncerID, - } - - syncerTemplate, err := embeddedResources.ReadFile("syncer.yaml") - if err != nil { - return nil, err - } - tmpl, err := template.New("syncerTemplate").Parse(string(syncerTemplate)) - if err != nil { - return nil, err - } - buffer := bytes.NewBuffer([]byte{}) - err = tmpl.Execute(buffer, tmplArgs) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// groupMapping associates an api group to the resources in that group. -type groupMapping struct { - APIGroup string - Resources []string -} - -// getGroupMappings returns the set of api groups to resources for the given resources. -func getGroupMappings(resourcesToSync []string) []groupMapping { - groupMap := make(map[string][]string) - - for _, resource := range resourcesToSync { - nameParts := strings.SplitN(resource, ".", 2) - name := nameParts[0] - apiGroup := "" - if len(nameParts) > 1 { - apiGroup = nameParts[1] - } - if _, ok := groupMap[apiGroup]; !ok { - groupMap[apiGroup] = []string{name} - } else { - groupMap[apiGroup] = append(groupMap[apiGroup], name) - } - // If pods are being synced, add the subresources that are required to - // support the pod subresources. - if apiGroup == "" && name == "pods" { - podSubresources := []string{ - "pods/log", - "pods/exec", - "pods/attach", - "pods/binding", - "pods/portforward", - "pods/proxy", - "pods/ephemeralcontainers", - } - groupMap[apiGroup] = append(groupMap[apiGroup], podSubresources...) - } - } - - groupMappings := make([]groupMapping, 0, len(groupMap)) - for apiGroup, resources := range groupMap { - groupMappings = append(groupMappings, groupMapping{ - APIGroup: apiGroup, - Resources: resources, - }) - } - - sortGroupMappings(groupMappings) - - return groupMappings -} - -// sortGroupMappings sorts group mappings first by APIGroup and then by Resources. -func sortGroupMappings(groupMappings []groupMapping) { - sort.Slice(groupMappings, func(i, j int) bool { - if groupMappings[i].APIGroup == groupMappings[j].APIGroup { - return strings.Join(groupMappings[i].Resources, ",") < strings.Join(groupMappings[j].Resources, ",") - } - return groupMappings[i].APIGroup < groupMappings[j].APIGroup - }) -} diff --git a/pkg/cliplugins/workload/plugin/sync_test.go b/pkg/cliplugins/workload/plugin/sync_test.go deleted file mode 100644 index 7f027a441b1..00000000000 --- a/pkg/cliplugins/workload/plugin/sync_test.go +++ /dev/null @@ -1,586 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" -) - -func TestNewSyncerYAML(t *testing.T) { - expectedYAML := `--- -apiVersion: v1 -kind: Namespace -metadata: - name: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: v1 -kind: Secret -metadata: - name: kcp-syncer-sync-target-name-34b23c4k-token - namespace: kcp-syncer-sync-target-name-34b23c4k - annotations: - kubernetes.io/service-account.name: kcp-syncer-sync-target-name-34b23c4k -type: kubernetes.io/service-account-token ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: kcp-syncer-sync-target-name-34b23c4k -rules: -- apiGroups: - - "" - resources: - - namespaces - verbs: - - "create" - - "get" - - "list" - - "watch" - - "delete" -- apiGroups: - - "" - resources: - - endpoints - verbs: - - "get" -- apiGroups: - - "apiextensions.k8s.io" - resources: - - customresourcedefinitions - verbs: - - "get" - - "watch" - - "list" -- apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - "create" - - "list" - - "watch" -- apiGroups: - - "" - resources: - - resource1 - - resource2 - verbs: - - "*" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kcp-syncer-sync-target-name-34b23c4k -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kcp-syncer-sync-target-name-34b23c4k -subjects: -- kind: ServiceAccount - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: kcp-dns-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -rules: -- apiGroups: - - "" - resources: - - serviceaccounts - - services - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" -- apiGroups: - - "" - resources: - - endpoints - verbs: - - "get" - - "list" - - "watch" -- apiGroups: - - "apps" - resources: - - deployments - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" -- apiGroups: - - "rbac.authorization.k8s.io" - resources: - - roles - - rolebindings - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kcp-dns-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kcp-dns-sync-target-name-34b23c4k -subjects: - - kind: ServiceAccount - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: v1 -kind: Secret -metadata: - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -stringData: - kubeconfig: | - apiVersion: v1 - kind: Config - clusters: - - name: default-cluster - cluster: - certificate-authority-data: ca-data - server: server-url - contexts: - - name: default-context - context: - cluster: default-cluster - namespace: kcp-namespace - user: default-user - current-context: default-context - users: - - name: default-user - user: - token: token ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: kcp-syncer-sync-target-name-34b23c4k - template: - metadata: - labels: - app: kcp-syncer-sync-target-name-34b23c4k - spec: - containers: - - name: kcp-syncer - command: - - /ko-app/syncer - args: - - --from-kubeconfig=/kcp/kubeconfig - - --sync-target-name=sync-target-name - - --sync-target-uid=sync-target-uid - - --from-cluster=root:default:foo - - --api-import-poll-interval=1m - - --downstream-namespace-clean-delay=2s - - --resources=resource1 - - --resources=resource2 - - --qps=123.4 - - --burst=456 - - --dns-image=image - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: image - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - name: kcp-config - mountPath: /kcp/ - readOnly: true - serviceAccountName: kcp-syncer-sync-target-name-34b23c4k - volumes: - - name: kcp-config - secret: - secretName: kcp-syncer-sync-target-name-34b23c4k - optional: false -` - - actualYAML, err := renderSyncerResources(templateInput{ - ServerURL: "server-url", - Token: "token", - CAData: "ca-data", - KCPNamespace: "kcp-namespace", - Namespace: "kcp-syncer-sync-target-name-34b23c4k", - SyncTargetPath: "root:default:foo", - SyncTarget: "sync-target-name", - SyncTargetUID: "sync-target-uid", - Image: "image", - Replicas: 1, - ResourcesToSync: []string{"resource1", "resource2"}, - APIImportPollIntervalString: "1m", - DownstreamNamespaceCleanDelayString: "2s", - QPS: 123.4, - Burst: 456, - }, "kcp-syncer-sync-target-name-34b23c4k", []string{"resource1", "resource2"}) - require.NoError(t, err) - require.Empty(t, cmp.Diff(expectedYAML, string(actualYAML))) -} - -func TestNewSyncerYAMLWithFeatureGates(t *testing.T) { - expectedYAML := `--- -apiVersion: v1 -kind: Namespace -metadata: - name: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: v1 -kind: Secret -metadata: - name: kcp-syncer-sync-target-name-34b23c4k-token - namespace: kcp-syncer-sync-target-name-34b23c4k - annotations: - kubernetes.io/service-account.name: kcp-syncer-sync-target-name-34b23c4k -type: kubernetes.io/service-account-token ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: kcp-syncer-sync-target-name-34b23c4k -rules: -- apiGroups: - - "" - resources: - - namespaces - verbs: - - "create" - - "get" - - "list" - - "watch" - - "delete" -- apiGroups: - - "" - resources: - - endpoints - verbs: - - "get" -- apiGroups: - - "apiextensions.k8s.io" - resources: - - customresourcedefinitions - verbs: - - "get" - - "watch" - - "list" -- apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - "create" - - "list" - - "watch" -- apiGroups: - - "" - resources: - - resource1 - - resource2 - verbs: - - "*" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kcp-syncer-sync-target-name-34b23c4k -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kcp-syncer-sync-target-name-34b23c4k -subjects: -- kind: ServiceAccount - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: kcp-dns-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -rules: -- apiGroups: - - "" - resources: - - serviceaccounts - - services - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" -- apiGroups: - - "" - resources: - - endpoints - verbs: - - "get" - - "list" - - "watch" -- apiGroups: - - "apps" - resources: - - deployments - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" -- apiGroups: - - "rbac.authorization.k8s.io" - resources: - - roles - - rolebindings - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kcp-dns-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kcp-dns-sync-target-name-34b23c4k -subjects: - - kind: ServiceAccount - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k ---- -apiVersion: v1 -kind: Secret -metadata: - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -stringData: - kubeconfig: | - apiVersion: v1 - kind: Config - clusters: - - name: default-cluster - cluster: - certificate-authority-data: ca-data - server: server-url - contexts: - - name: default-context - context: - cluster: default-cluster - namespace: kcp-namespace - user: default-user - current-context: default-context - users: - - name: default-user - user: - token: token ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kcp-syncer-sync-target-name-34b23c4k - namespace: kcp-syncer-sync-target-name-34b23c4k -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: kcp-syncer-sync-target-name-34b23c4k - template: - metadata: - labels: - app: kcp-syncer-sync-target-name-34b23c4k - spec: - containers: - - name: kcp-syncer - command: - - /ko-app/syncer - args: - - --from-kubeconfig=/kcp/kubeconfig - - --sync-target-name=sync-target-name - - --sync-target-uid=sync-target-uid - - --from-cluster=root:default:foo - - --api-import-poll-interval=1m - - --downstream-namespace-clean-delay=2s - - --resources=resource1 - - --resources=resource2 - - --qps=123.4 - - --burst=456 - - --feature-gates=myfeature=true - - --dns-image=image - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: image - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - name: kcp-config - mountPath: /kcp/ - readOnly: true - serviceAccountName: kcp-syncer-sync-target-name-34b23c4k - volumes: - - name: kcp-config - secret: - secretName: kcp-syncer-sync-target-name-34b23c4k - optional: false -` - actualYAML, err := renderSyncerResources(templateInput{ - ServerURL: "server-url", - Token: "token", - CAData: "ca-data", - KCPNamespace: "kcp-namespace", - Namespace: "kcp-syncer-sync-target-name-34b23c4k", - SyncTargetPath: "root:default:foo", - SyncTarget: "sync-target-name", - SyncTargetUID: "sync-target-uid", - Image: "image", - Replicas: 1, - ResourcesToSync: []string{"resource1", "resource2"}, - QPS: 123.4, - Burst: 456, - APIImportPollIntervalString: "1m", - DownstreamNamespaceCleanDelayString: "2s", - FeatureGatesString: "myfeature=true", - }, "kcp-syncer-sync-target-name-34b23c4k", []string{"resource1", "resource2"}) - require.NoError(t, err) - require.Empty(t, cmp.Diff(expectedYAML, string(actualYAML))) -} - -func TestGetGroupMappings(t *testing.T) { - testCases := []struct { - name string - input []string - expected []groupMapping - }{ - { - name: "no group mappings", - }, - { - name: "core type", - input: []string{ - "services", - }, - expected: []groupMapping{ - { - APIGroup: "", - Resources: []string{ - "services", - }, - }, - }, - }, - { - name: "type with group", - input: []string{ - "deployments.apps", - }, - expected: []groupMapping{ - { - APIGroup: "apps", - Resources: []string{ - "deployments", - }, - }, - }, - }, - { - name: "multiple types", - input: []string{ - "deployments.apps", - "services", - "secrets", - }, - expected: []groupMapping{ - { - APIGroup: "", - Resources: []string{ - "services", - "secrets", - }, - }, - { - APIGroup: "apps", - Resources: []string{ - "deployments", - }, - }, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - actual := getGroupMappings(tc.input) - if len(tc.input) == 0 { - require.Empty(t, actual) - } else { - require.Empty(t, cmp.Diff(tc.expected, actual)) - } - }) - } -} diff --git a/pkg/cliplugins/workload/plugin/syncer.yaml b/pkg/cliplugins/workload/plugin/syncer.yaml deleted file mode 100644 index 1eca47fe619..00000000000 --- a/pkg/cliplugins/workload/plugin/syncer.yaml +++ /dev/null @@ -1,227 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: {{.Namespace}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{.ServiceAccount}} - namespace: {{.Namespace}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{.ServiceAccount}}-token - namespace: {{.Namespace}} - annotations: - kubernetes.io/service-account.name: {{.ServiceAccount}} -type: kubernetes.io/service-account-token ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{.ClusterRole}} -rules: -- apiGroups: - - "" - resources: - - namespaces - verbs: - - "create" - - "get" - - "list" - - "watch" - - "delete" -- apiGroups: - - "" - resources: - - endpoints - verbs: - - "get" -- apiGroups: - - "apiextensions.k8s.io" - resources: - - customresourcedefinitions - verbs: - - "get" - - "watch" - - "list" -- apiGroups: - - "networking.k8s.io" - resources: - - networkpolicies - verbs: - - "create" - - "list" - - "watch" -{{- range $groupMapping := .GroupMappings}} -- apiGroups: - - "{{$groupMapping.APIGroup}}" - resources: - {{- range $resource := $groupMapping.Resources}} - - {{$resource}} - {{- end}} - verbs: - - "*" -{{- end}} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{.ClusterRoleBinding}} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{.ClusterRole}} -subjects: -- kind: ServiceAccount - name: {{.ServiceAccount}} - namespace: {{.Namespace}} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{.DNSRole}} - namespace: {{.Namespace}} -rules: -- apiGroups: - - "" - resources: - - serviceaccounts - - services - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" -- apiGroups: - - "" - resources: - - endpoints - verbs: - - "get" - - "list" - - "watch" -- apiGroups: - - "apps" - resources: - - deployments - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" -- apiGroups: - - "rbac.authorization.k8s.io" - resources: - - roles - - rolebindings - verbs: - - "create" - - "get" - - "list" - - "update" - - "delete" - - "watch" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{.DNSRoleBinding}} - namespace: {{.Namespace}} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{.DNSRole}} -subjects: - - kind: ServiceAccount - name: {{.ServiceAccount}} - namespace: {{.Namespace}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: {{.Secret}} - namespace: {{.Namespace}} -stringData: - {{.SecretConfigKey}}: | - apiVersion: v1 - kind: Config - clusters: - - name: default-cluster - cluster: - certificate-authority-data: {{.CAData}} - server: {{.ServerURL}} - contexts: - - name: default-context - context: - cluster: default-cluster - namespace: {{.KCPNamespace}} - user: default-user - current-context: default-context - users: - - name: default-user - user: - token: {{.Token}} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{.Deployment}} - namespace: {{.Namespace}} -spec: - replicas: {{.Replicas}} - strategy: - type: Recreate - selector: - matchLabels: - app: {{.DeploymentApp}} - template: - metadata: - labels: - app: {{.DeploymentApp}} - spec: - containers: - - name: kcp-syncer - command: - - /ko-app/syncer - args: - - --from-kubeconfig=/kcp/{{.SecretConfigKey}} - - --sync-target-name={{.SyncTarget}} - - --sync-target-uid={{.SyncTargetUID}} - - --from-cluster={{.SyncTargetPath}} - - --api-import-poll-interval={{ .APIImportPollIntervalString }} - - --downstream-namespace-clean-delay={{ .DownstreamNamespaceCleanDelayString }} -{{- range $resourceToSync := .ResourcesToSync}} - - --resources={{$resourceToSync}} -{{- end}} - - --qps={{.QPS}} - - --burst={{.Burst}} -{{- if .FeatureGatesString }} - - --feature-gates={{ .FeatureGatesString }} -{{- end}} - - --dns-image={{.Image}} - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: {{.Image}} - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - volumeMounts: - - name: kcp-config - mountPath: /kcp/ - readOnly: true - serviceAccountName: {{.ServiceAccount}} - volumes: - - name: kcp-config - secret: - secretName: {{.Secret}} - optional: false diff --git a/pkg/cliplugins/workload/plugin/workload.go b/pkg/cliplugins/workload/plugin/workload.go deleted file mode 100644 index e6d70d8abe2..00000000000 --- a/pkg/cliplugins/workload/plugin/workload.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package plugin - -import ( - "context" - "errors" - "fmt" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/cli-runtime/pkg/genericclioptions" - - "github.com/kcp-dev/kcp/pkg/cliplugins/base" - kcpclient "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" -) - -// CordonOptions contains options for cordoning or uncordoning a SyncTarget. -type CordonOptions struct { - *base.Options - - // SyncTarget is the name of the SyncTarget to cordon or uncordon. - SyncTarget string - // Cordon indicates if the SyncTarget should be cordoned (true) or uncordoned (false). - Cordon bool -} - -// NewCordonOptions returns a new CordonOptions. -func NewCordonOptions(streams genericclioptions.IOStreams) *CordonOptions { - return &CordonOptions{ - Options: base.NewOptions(streams), - } -} - -// Complete ensures all dynamically populated fields are initialized. -func (o *CordonOptions) Complete(args []string) error { - if err := o.Options.Complete(); err != nil { - return err - } - - if len(args) > 0 { - o.SyncTarget = args[0] - } - - return nil -} - -// Validate validates the CordonOptions are complete and usable. -func (o *CordonOptions) Validate() error { - if o.SyncTarget == "" { - return errors.New("sync target name is required") - } - - return nil -} - -// Run cordons the sync target and marks it as unschedulable. -func (o *CordonOptions) Run(ctx context.Context) error { - config, err := o.ClientConfig.ClientConfig() - if err != nil { - return err - } - - kcpClient, err := kcpclient.NewForConfig(config) - if err != nil { - return fmt.Errorf("failed to create kcp client: %w", err) - } - - syncTarget, err := kcpClient.WorkloadV1alpha1().SyncTargets().Get(ctx, o.SyncTarget, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get SyncTarget %s: %w", o.SyncTarget, err) - } - - // See if there is nothing to do - if o.Cordon && syncTarget.Spec.Unschedulable { - fmt.Fprintln(o.Out, o.SyncTarget, "already cordoned") - return nil - } else if !o.Cordon && !syncTarget.Spec.Unschedulable { - fmt.Fprintln(o.Out, o.SyncTarget, "already uncordoned") - return nil - } - - var patchBytes []byte - if o.Cordon { - patchBytes = []byte(`[{"op":"replace","path":"/spec/unschedulable","value":true}]`) - } else { - evict := `` - if syncTarget.Spec.EvictAfter != nil { - evict = `,{"op":"remove","path":"/spec/evictAfter"}` - } - - patchBytes = []byte(`[{"op":"replace","path":"/spec/unschedulable","value":false}` + evict + `]`) - } - - _, err = kcpClient.WorkloadV1alpha1().SyncTargets().Patch(ctx, o.SyncTarget, types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - if err != nil { - return fmt.Errorf("failed to update SyncTarget %s: %w", o.SyncTarget, err) - } - - if o.Cordon { - fmt.Fprintln(o.Out, o.SyncTarget, "cordoned") - } else { - fmt.Fprintln(o.Out, o.SyncTarget, "uncordoned") - } - - return nil -} - -// DrainOptions contains options for draining a SyncTarget. -type DrainOptions struct { - *base.Options - - // SyncTarget is the name of the SyncTarget to drain. - SyncTarget string -} - -// NewDrainOptions returns a new DrainOptions. -func NewDrainOptions(streams genericclioptions.IOStreams) *DrainOptions { - return &DrainOptions{ - Options: base.NewOptions(streams), - } -} - -// Complete ensures all dynamically populated fields are initialized. -func (o *DrainOptions) Complete(args []string) error { - if err := o.Options.Complete(); err != nil { - return err - } - - if len(args) > 0 { - o.SyncTarget = args[0] - } - - return nil -} - -// Validate validates the DrainOptions are complete and usable. -func (o *DrainOptions) Validate() error { - if o.SyncTarget == "" { - return errors.New("sync target name is required") - } - - return nil -} - -// Run drains the sync target and marks it as unschedulable. -func (o *DrainOptions) Run(ctx context.Context) error { - config, err := o.ClientConfig.ClientConfig() - if err != nil { - return err - } - - kcpClient, err := kcpclient.NewForConfig(config) - if err != nil { - return fmt.Errorf("failed to create kcp client: %w", err) - } - - syncTarget, err := kcpClient.WorkloadV1alpha1().SyncTargets().Get(ctx, o.SyncTarget, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get synctarget %s: %w", o.SyncTarget, err) - } - - // See if there is nothing to do - if syncTarget.Spec.EvictAfter != nil && syncTarget.Spec.Unschedulable { - fmt.Fprintln(o.Out, o.SyncTarget, "already draining") - return nil - } - - nowTime := time.Now().UTC() - var patchBytes = []byte(`[{"op":"replace","path":"/spec/unschedulable","value":true},{"op":"replace","path":"/spec/evictAfter","value":"` + nowTime.Format(time.RFC3339) + `"}]`) - - _, err = kcpClient.WorkloadV1alpha1().SyncTargets().Patch(ctx, o.SyncTarget, types.JSONPatchType, patchBytes, metav1.PatchOptions{}) - - if err != nil { - return fmt.Errorf("failed to update SyncTarget %s: %w", o.SyncTarget, err) - } - - fmt.Fprintln(o.Out, o.SyncTarget, "draining") - - return nil -} diff --git a/pkg/dns/plugin/nsmap/README.md b/pkg/dns/plugin/nsmap/README.md deleted file mode 100644 index 448835ae5b1..00000000000 --- a/pkg/dns/plugin/nsmap/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# nsmap - -## Name - -*nsmap* - performs rewriting of Kubernetes namespace in DNS name. - -## Description - -Rewrite DNS names from `..svc.` to `..svc.` - -## Syntax - -~~~ -nsmap -~~~ diff --git a/pkg/dns/plugin/nsmap/config.go b/pkg/dns/plugin/nsmap/config.go deleted file mode 100644 index 3b6dbbde262..00000000000 --- a/pkg/dns/plugin/nsmap/config.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nsmap - -import ( - "context" - "errors" - "os" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" -) - -var ( - // ConfigMapName is the name of the configmap containing logical to physical namespace mappings. - ConfigMapName = "config-nsmap" -) - -// OnUpdateFn is the function signature for receiving ConfigMap updates. -type OnUpdateFn func(ctx context.Context, configMap *corev1.ConfigMap) - -// StartWatcher starts watching for nsmap ConfigMap updates and -// notifies the given callback when an update occurs. This is a non-blocking function. -func StartWatcher(ctx context.Context, callback OnUpdateFn) error { - config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - &clientcmd.ClientConfigLoadingRules{}, - &clientcmd.ConfigOverrides{}).ClientConfig() - if err != nil { - return err - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return err - } - - factory := informers.NewSharedInformerFactoryWithOptions(clientset, 0, - informers.WithNamespace(os.Getenv("NAMESPACE")), - informers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector(metav1.ObjectNameField, ConfigMapName).String() - })) - - informer := factory.Core().V1().ConfigMaps().Informer() - - _, _ = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - callback(ctx, obj.(*corev1.ConfigMap)) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - callback(ctx, newObj.(*corev1.ConfigMap)) - }, - DeleteFunc: func(obj interface{}) { - callback(ctx, nil) - }, - }) - - go factory.Start(ctx.Done()) - - if synced := cache.WaitForCacheSync(ctx.Done(), informer.HasSynced); !synced { - return errors.New("configmap informer cache failed to sync") - } - - return nil -} diff --git a/pkg/dns/plugin/nsmap/namespace.go b/pkg/dns/plugin/nsmap/namespace.go deleted file mode 100644 index f077fd836ae..00000000000 --- a/pkg/dns/plugin/nsmap/namespace.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nsmap - -import ( - "context" - "strings" - - "github.com/coredns/coredns/plugin/rewrite" - "github.com/coredns/coredns/request" - - corev1 "k8s.io/api/core/v1" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/third_party/coredns" -) - -type namespaceRewriter struct { - Namespaces map[string]string -} - -func (m *namespaceRewriter) updateFromConfigmap(ctx context.Context, cm *corev1.ConfigMap) { - logger := klog.FromContext(ctx) - logger.Info("reloading nsmap ConfigMap", "data", cm.Data) - m.Namespaces = cm.Data -} - -// Rewrite the current request by replacing logical namespace to physical namespace (when applicable). -func (m *namespaceRewriter) Rewrite(ctx context.Context, state request.Request) rewrite.ResponseRules { - logger := klog.FromContext(ctx) - name := state.Name() - - parts := strings.SplitN(name, ".", 3) - if len(parts) < 2 { - // No dots: fallthrough - return nil - } - - if len(parts) == 3 && !strings.HasPrefix(parts[2], "svc") { - // not a cluster local name: fallthrough - return nil - } - - ns := parts[1] - targetNs := m.Namespaces[ns] - if targetNs == "" { - return nil - } - - // TODO(LV): check the response resolves. If not, try again without rewriting - // For instance, bit.ly can either refer to a local service (name: bit, ns: ly) or an external service. - // The ly namespace might not contain a bit service, and will fail to be properly resolved. - replacement := parts[0] + "." + targetNs + "." + parts[2] - - logger.V(4).WithValues("before", name, "after", replacement).Info("rewriting dns name") - - state.Req.Question[0].Name = replacement - - rewriter := coredns.NewRemapStringRewriter(state.Req.Question[0].Name, state.Name()) - return rewrite.ResponseRules{ - &coredns.NameRewriterResponseRule{RemapStringRewriter: rewriter}, - &coredns.ValueRewriterResponseRule{RemapStringRewriter: rewriter}, - } -} diff --git a/pkg/dns/plugin/nsmap/nsmap.go b/pkg/dns/plugin/nsmap/nsmap.go deleted file mode 100644 index 0c65e990f89..00000000000 --- a/pkg/dns/plugin/nsmap/nsmap.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nsmap - -import ( - "context" - - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/rewrite" - "github.com/coredns/coredns/request" - "github.com/miekg/dns" -) - -// LogicalToPhysicalNamespaceMapper is a CoreDNS plugin to map logical namespaces to physical namespaces. -type LogicalToPhysicalNamespaceMapper struct { - Next plugin.Handler - namespace *namespaceRewriter - revertPolicy rewrite.RevertPolicy -} - -// ServeDNS implements the plugin.Handler interface. -func (nm LogicalToPhysicalNamespaceMapper) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { - wr := rewrite.NewResponseReverter(w, r, nm.revertPolicy) - state := request.Request{W: w, Req: r} - - // Only one rewrite rule to apply, namespace rewriter - respRules := nm.namespace.Rewrite(ctx, state) - - if respRules != nil { - wr.ResponseRules = append(wr.ResponseRules, respRules...) - return plugin.NextOrFailure(nm.Name(), nm.Next, ctx, wr, r) - } - - return plugin.NextOrFailure(nm.Name(), nm.Next, ctx, w, r) -} - -// Name implements the Handler interface. -func (nm LogicalToPhysicalNamespaceMapper) Name() string { - return "nsmap" -} diff --git a/pkg/dns/plugin/nsmap/setup.go b/pkg/dns/plugin/nsmap/setup.go deleted file mode 100644 index 01fb0b5e592..00000000000 --- a/pkg/dns/plugin/nsmap/setup.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nsmap - -import ( - "context" - - "github.com/coredns/caddy" - "github.com/coredns/coredns/core/dnsserver" - "github.com/coredns/coredns/plugin" - "github.com/coredns/coredns/plugin/rewrite" - - "k8s.io/klog/v2" -) - -func init() { - plugin.Register("nsmap", setup) -} - -func setup(c *caddy.Controller) error { - rewriter := &namespaceRewriter{Namespaces: map[string]string{}} - - dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { - return LogicalToPhysicalNamespaceMapper{ - Next: next, - namespace: rewriter, - revertPolicy: rewrite.NewRevertPolicy(false, false), - } - }) - - // Start ConfigMap watcher and notify the namespace rewriter - ctx, cancel := context.WithCancel(context.Background()) - ctx = klog.NewContext(ctx, klog.Background()) - - err := StartWatcher(ctx, rewriter.updateFromConfigmap) - if err != nil { - cancel() - return err - } - - c.OnFinalShutdown(func() error { - cancel() - return nil - }) - - return nil -} diff --git a/pkg/features/kcp_features.go b/pkg/features/kcp_features.go index b9d762967bd..7999c5e5fb2 100644 --- a/pkg/features/kcp_features.go +++ b/pkg/features/kcp_features.go @@ -31,23 +31,11 @@ import ( ) const ( - // Every feature gate should add method here following this template: - // - // // owner: @username - // // alpha: v1.4 - // MyFeature() bool. - - // owner: @sttts - // alpha: v0.4 - // - // Enable the scheduling.kcp.io/v1alpha1 API group, and related controllers. - LocationAPI featuregate.Feature = "KCPLocationAPI" - - // owner: @aojea - // alpha: v0.8 - // - // Enable reverse tunnels to the downstream clusters through the syncers. - SyncerTunnel featuregate.Feature = "KCPSyncerTunnel" +// Every feature gate should add method here following this template: +// +// // owner: @username +// // alpha: v1.4 +// MyFeature() bool. ) // DefaultFeatureGate exposes the upstream feature gate, but with our gate setting applied. @@ -96,9 +84,6 @@ func (f *kcpFeatureGate) Type() string { // in the generic control plane code. To add a new feature, define a key for it above and add it // here. The features will be available throughout Kubernetes binaries. var defaultGenericControlPlaneFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - LocationAPI: {Default: true, PreRelease: featuregate.Alpha}, - SyncerTunnel: {Default: true, PreRelease: featuregate.Alpha}, - // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, diff --git a/pkg/indexers/indexers.go b/pkg/indexers/indexers.go index 0b66b4cd5ee..5f1075ea695 100644 --- a/pkg/indexers/indexers.go +++ b/pkg/indexers/indexers.go @@ -18,7 +18,6 @@ package indexers import ( "fmt" - "strings" "github.com/kcp-dev/logicalcluster/v3" @@ -28,58 +27,19 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/cache" - syncershared "github.com/kcp-dev/kcp/pkg/syncer/shared" "github.com/kcp-dev/kcp/sdk/apis/core" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) const ( - // BySyncerFinalizerKey is the name for the index that indexes by syncer finalizer label keys. - BySyncerFinalizerKey = "bySyncerFinalizerKey" // APIBindingByClusterAndAcceptedClaimedGroupResources is the name for the index that indexes an APIBinding by its // cluster name and accepted claimed group resources. APIBindingByClusterAndAcceptedClaimedGroupResources = "byClusterAndAcceptedClaimedGroupResources" - // ByClusterResourceStateLabelKey indexes resources based on the cluster state label key. - ByClusterResourceStateLabelKey = "ByClusterResourceStateLabelKey" // ByLogicalClusterPath indexes by logical cluster path, if the annotation exists. ByLogicalClusterPath = "ByLogicalClusterPath" // ByLogicalClusterPathAndName indexes by logical cluster path and object name, if the annotation exists. ByLogicalClusterPathAndName = "ByLogicalClusterPathAndName" ) -// IndexBySyncerFinalizerKey indexes by syncer finalizer label keys. -func IndexBySyncerFinalizerKey(obj interface{}) ([]string, error) { - metaObj, ok := obj.(metav1.Object) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a metav1.Object, but is %T", obj) - } - - syncerFinalizers := []string{} - for _, f := range metaObj.GetFinalizers() { - if strings.HasPrefix(f, syncershared.SyncerFinalizerNamePrefix) { - syncerFinalizers = append(syncerFinalizers, f) - } - } - - return syncerFinalizers, nil -} - -// IndexByClusterResourceStateLabelKey indexes resources based on the cluster state key label. -func IndexByClusterResourceStateLabelKey(obj interface{}) ([]string, error) { - metaObj, ok := obj.(metav1.Object) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a metav1.Object, but is %T", obj) - } - - ClusterResourceStateLabelKeys := []string{} - for k := range metaObj.GetLabels() { - if strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - ClusterResourceStateLabelKeys = append(ClusterResourceStateLabelKeys, k) - } - } - return ClusterResourceStateLabelKeys, nil -} - // IndexByLogicalClusterPath indexes by logical cluster path, if the annotation exists. func IndexByLogicalClusterPath(obj interface{}) ([]string, error) { metaObj, ok := obj.(metav1.Object) diff --git a/pkg/indexers/workload.go b/pkg/indexers/workload.go deleted file mode 100644 index 8e49fbb2140..00000000000 --- a/pkg/indexers/workload.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package indexers - -import ( - "fmt" - - "github.com/kcp-dev/logicalcluster/v3" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const ( - SyncTargetsBySyncTargetKey = "SyncTargetsBySyncTargetKey" -) - -func IndexSyncTargetsBySyncTargetKey(obj interface{}) ([]string, error) { - syncTarget, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a workloadv1alpha1.SyncTarget, but is %T", obj) - } - - return []string{workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name)}, nil -} diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index 527af41587b..4bb47e32fda 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -88,17 +88,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1.ShardList": schema_sdk_apis_core_v1alpha1_ShardList(ref), "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1.ShardSpec": schema_sdk_apis_core_v1alpha1_ShardSpec(ref), "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1.ShardStatus": schema_sdk_apis_core_v1alpha1_ShardStatus(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.AvailableSelectorLabel": schema_sdk_apis_scheduling_v1alpha1_AvailableSelectorLabel(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.GroupVersionResource": schema_sdk_apis_scheduling_v1alpha1_GroupVersionResource(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.Location": schema_sdk_apis_scheduling_v1alpha1_Location(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationList": schema_sdk_apis_scheduling_v1alpha1_LocationList(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationReference": schema_sdk_apis_scheduling_v1alpha1_LocationReference(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationSpec": schema_sdk_apis_scheduling_v1alpha1_LocationSpec(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationStatus": schema_sdk_apis_scheduling_v1alpha1_LocationStatus(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.Placement": schema_sdk_apis_scheduling_v1alpha1_Placement(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementList": schema_sdk_apis_scheduling_v1alpha1_PlacementList(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementSpec": schema_sdk_apis_scheduling_v1alpha1_PlacementSpec(ref), - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementStatus": schema_sdk_apis_scheduling_v1alpha1_PlacementStatus(ref), "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference": schema_sdk_apis_tenancy_v1alpha1_APIExportReference(ref), "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.VirtualWorkspace": schema_sdk_apis_tenancy_v1alpha1_VirtualWorkspace(ref), "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.Workspace": schema_sdk_apis_tenancy_v1alpha1_Workspace(ref), @@ -121,13 +110,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetSpec": schema_sdk_apis_topology_v1alpha1_PartitionSetSpec(ref), "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetStatus": schema_sdk_apis_topology_v1alpha1_PartitionSetStatus(ref), "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSpec": schema_sdk_apis_topology_v1alpha1_PartitionSpec(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.ResourceToSync": schema_sdk_apis_workload_v1alpha1_ResourceToSync(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTarget": schema_sdk_apis_workload_v1alpha1_SyncTarget(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetList": schema_sdk_apis_workload_v1alpha1_SyncTargetList(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetSpec": schema_sdk_apis_workload_v1alpha1_SyncTargetSpec(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetStatus": schema_sdk_apis_workload_v1alpha1_SyncTargetStatus(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.TunnelWorkspace": schema_sdk_apis_workload_v1alpha1_TunnelWorkspace(ref), - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.VirtualWorkspace": schema_sdk_apis_workload_v1alpha1_VirtualWorkspace(ref), "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), @@ -3005,97 +2987,61 @@ func schema_sdk_apis_core_v1alpha1_ShardStatus(ref common.ReferenceCallback) com } } -func schema_sdk_apis_scheduling_v1alpha1_AvailableSelectorLabel(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_APIExportReference(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "AvailableSelectorLabel specifies a label with key name and possible values.", + Description: "APIExportReference provides the fields necessary to resolve an APIExport.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "key": { + "path": { SchemaProps: spec.SchemaProps{ - Description: "key is the name of the label.", - Default: "", + Description: "path is the fully-qualified path to the workspace containing the APIExport. If it is empty, the current workspace is assumed.", Type: []string{"string"}, Format: "", }, }, - "values": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "set", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "values are the possible values for this labels.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "description": { + "export": { SchemaProps: spec.SchemaProps{ - Description: "description is a human readable description of the label.", + Description: "export is the name of the APIExport.", + Default: "", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"key", "values"}, + Required: []string{"export"}, }, }, } } -func schema_sdk_apis_scheduling_v1alpha1_GroupVersionResource(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_VirtualWorkspace(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "GroupVersionResource unambiguously identifies a resource.", - Type: []string{"object"}, + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Description: "group is the name of an API group.", - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Description: "version is the version of the API.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "resource": { + "url": { SchemaProps: spec.SchemaProps{ - Description: "resource is the name of the resource.", + Description: "url is a WorkspaceType initialization virtual workspace URL.", Default: "", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"version", "resource"}, + Required: []string{"url"}, }, }, } } -func schema_sdk_apis_scheduling_v1alpha1_Location(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_Workspace(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Location represents a set of instances of a scheduling resource type acting a target of scheduling.\n\nThe location is chosen by the user (in the future) through a Placement object, while the instance is chosen by the scheduler depending on considerations like load or available resources, or further node selectors specified by the user.", + Description: "Workspace defines a generic Kubernetes-cluster-like endpoint, with standard Kubernetes discovery APIs, OpenAPI and resource API endpoints.\n\nA workspace can be backed by different concrete types of workspace implementation, depending on access pattern. All workspace implementations share the characteristic that the URL that serves a given workspace can be used with standard Kubernetes API machinery and client libraries and command line tools.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -3121,28 +3067,29 @@ func schema_sdk_apis_scheduling_v1alpha1_Location(ref common.ReferenceCallback) "spec": { SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationSpec"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationStatus"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceStatus"), }, }, }, + Required: []string{"spec"}, }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationSpec", "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceSpec", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_sdk_apis_scheduling_v1alpha1_LocationList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "LocationList is a list of locations.", + Description: "WorkspaceList is a list of Workspaces", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -3172,7 +3119,7 @@ func schema_sdk_apis_scheduling_v1alpha1_LocationList(ref common.ReferenceCallba Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.Location"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.Workspace"), }, }, }, @@ -3183,130 +3130,128 @@ func schema_sdk_apis_scheduling_v1alpha1_LocationList(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.Location", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.Workspace", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_sdk_apis_scheduling_v1alpha1_LocationReference(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "LocationReference describes a location that are provided in the specified Workspace.", - Type: []string{"object"}, + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "path is an absolute reference to a workspace, e.g. root:org:ws. The workspace must be some ancestor or a child of some ancestor.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "locationName": { + "selector": { SchemaProps: spec.SchemaProps{ - Description: "Name of the Location.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "selector is a label selector that filters workspace scheduling targets.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, }, - Required: []string{"path", "locationName"}, }, }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } -func schema_sdk_apis_scheduling_v1alpha1_LocationSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "LocationSpec holds the desired state of the Location.", + Description: "WorkspaceSpec holds the desired state of the Workspace.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "resource": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "resource is the group-version-resource of the instances that are subject to this location.", + Description: "type defines properties of the workspace both on creation (e.g. initial resources and initially installed APIs) and during runtime (e.g. permissions). If no type is provided, the default type for the workspace in which this workspace is nesting will be used.\n\nThe type is a reference to a WorkspaceType in the listed workspace, but lower-cased. The WorkspaceType existence is validated at admission during creation. The type is immutable after creation. The use of a type is gated via the RBAC workspacetypes/use resource permission.", Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.GroupVersionResource"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), }, }, - "description": { + "location": { SchemaProps: spec.SchemaProps{ - Description: "description is a human-readable description of the location.", - Type: []string{"string"}, - Format: "", + Description: "location constraints where this workspace can be scheduled to.\n\nIf the no location is specified, an arbitrary location is chosen.", + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceLocation"), }, }, - "availableSelectorLabels": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "key", - }, - "x-kubernetes-list-type": "map", - }, - }, + "cluster": { SchemaProps: spec.SchemaProps{ - Description: "availableSelectorLabels is a list of labels that can be used to select an instance at this location in a placement object.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.AvailableSelectorLabel"), - }, - }, - }, + Description: "cluster is the name of the logical cluster this workspace is stored under.\n\nSet by the system.", + Type: []string{"string"}, + Format: "", }, }, - "instanceSelector": { + "URL": { SchemaProps: spec.SchemaProps{ - Description: "instanceSelector chooses the instances that will be part of this location.\n\nNote that these labels are not what is shown in the Location objects to the user. Depending on context, both will match or won't match.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + Description: "URL is the address under which the Kubernetes-cluster-like endpoint can be found. This URL can be used to access the workspace with standard Kubernetes client libraries and command line tools.\n\nSet by the system.", + Type: []string{"string"}, + Format: "", }, }, }, - Required: []string{"resource"}, }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.AvailableSelectorLabel", "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.GroupVersionResource", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceLocation", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"}, } } -func schema_sdk_apis_scheduling_v1alpha1_LocationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "LocationStatus defines the observed state of Location.", + Description: "WorkspaceStatus communicates the observed state of the Workspace.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "instances": { + "phase": { SchemaProps: spec.SchemaProps{ - Description: "instances is the number of actual instances at this location.", - Type: []string{"integer"}, - Format: "int64", + Description: "Phase of the workspace (Scheduling, Initializing, Ready).", + Type: []string{"string"}, + Format: "", }, }, - "availableInstances": { + "conditions": { SchemaProps: spec.SchemaProps{ - Description: "available is the number of actual instances that are available at this location.", - Type: []string{"integer"}, - Format: "int64", + Description: "Current processing state of the Workspace.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"), + }, + }, + }, + }, + }, + "initializers": { + SchemaProps: spec.SchemaProps{ + Description: "initializers must be cleared by a controller before the workspace is ready and can be used.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, }, }, }, + Dependencies: []string{ + "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, } } -func schema_sdk_apis_scheduling_v1alpha1_Placement(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceType(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Placement defines a selection rule to choose ONE location for MULTIPLE namespaces in a workspace.\n\nplacement is in Pending state initially. When a location is selected by the placement, the placement turns to Unbound state. In Pending or Unbound state, the selection rule can be updated to select another location. When the a namespace is annotated by another controller or user with the key of \"scheduling.kcp.io/placement\", the namespace will pick one placement, and this placement is transferred to Bound state. Any update to spec of the placement is ignored in Bound state and reflected in the conditions. The placement will turn back to Unbound state when no namespace uses this placement any more.", + Description: "WorkspaceType specifies behaviour of workspaces of this type.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -3332,28 +3277,57 @@ func schema_sdk_apis_scheduling_v1alpha1_Placement(ref common.ReferenceCallback) "spec": { SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementSpec"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementStatus"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSpec", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeExtension(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceTypeExtension defines how other WorkspaceTypes are composed together to add functionality to the owning WorkspaceType.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "with": { + SchemaProps: spec.SchemaProps{ + Description: "with are WorkspaceTypes whose initializers are added to the list for the owning type, and for whom the owning type becomes an alias, as long as all of their required types are not mentioned in without.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), + }, + }, + }, }, }, }, }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementSpec", "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.PlacementStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"}, } } -func schema_sdk_apis_scheduling_v1alpha1_PlacementList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PlacementList is a list of locations.", + Description: "WorkspaceTypeList is a list of workspace types", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -3383,7 +3357,7 @@ func schema_sdk_apis_scheduling_v1alpha1_PlacementList(ref common.ReferenceCallb Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.Placement"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceType"), }, }, }, @@ -3394,87 +3368,62 @@ func schema_sdk_apis_scheduling_v1alpha1_PlacementList(ref common.ReferenceCallb }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.Placement", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceType", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_sdk_apis_scheduling_v1alpha1_PlacementSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeReference(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "WorkspaceTypeReference is a globally unique, fully qualified reference to a workspace type.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "locationSelectors": { - SchemaProps: spec.SchemaProps{ - Description: "locationSelectors represents a slice of label selector to select a location, these label selectors are logically ORed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - }, - }, - }, - "locationResource": { - SchemaProps: spec.SchemaProps{ - Description: "locationResource is the group-version-resource of the instances that are subject to the locations to select.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.GroupVersionResource"), - }, - }, - "namespaceSelector": { + "name": { SchemaProps: spec.SchemaProps{ - Description: "namespaceSelector is a label selector to select ns. It match all ns by default, but can be specified to a certain set of ns.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + Description: "name is the name of the WorkspaceType", + Default: "", + Type: []string{"string"}, + Format: "", }, }, - "locationWorkspace": { + "path": { SchemaProps: spec.SchemaProps{ - Description: "locationWorkspace is an absolute reference to a workspace for the location. If it is not set, the workspace of APIBinding will be used.", + Description: "path is an absolute reference to the workspace that owns this type, e.g. root:org:ws.", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"locationResource"}, + Required: []string{"name"}, }, }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.GroupVersionResource", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } -func schema_sdk_apis_scheduling_v1alpha1_PlacementStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "WorkspaceTypeSelector describes a set of types.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "phase": { + "none": { SchemaProps: spec.SchemaProps{ - Description: "phase is the current phase of the placement", - Type: []string{"string"}, + Description: "none means that no type matches.", + Type: []string{"boolean"}, Format: "", }, }, - "selectedLocation": { - SchemaProps: spec.SchemaProps{ - Description: "selectedLocation is the location that a picked by this placement.", - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationReference"), - }, - }, - "conditions": { + "types": { SchemaProps: spec.SchemaProps{ - Description: "Current processing state of the Placement.", + Description: "types is a list of WorkspaceTypes that match. A workspace type extending another workspace type automatically is considered as that extended type as well (even transitively).\n\nAn empty list matches all types.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), }, }, }, @@ -3484,881 +3433,96 @@ func schema_sdk_apis_scheduling_v1alpha1_PlacementStatus(ref common.ReferenceCal }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1.LocationReference", "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"}, } } -func schema_sdk_apis_tenancy_v1alpha1_APIExportReference(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "APIExportReference provides the fields necessary to resolve an APIExport.", - Type: []string{"object"}, + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "path": { + "initializer": { SchemaProps: spec.SchemaProps{ - Description: "path is the fully-qualified path to the workspace containing the APIExport. If it is empty, the current workspace is assumed.", - Type: []string{"string"}, + Description: "initializer determines if this WorkspaceType has an associated initializing controller. These controllers are used to add functionality to a Workspace; all controllers must finish their work before the Workspace becomes ready for use.\n\nOne initializing controller is supported per WorkspaceType; the identifier for this initializer will be a colon-delimited string using the workspace in which the WorkspaceType is defined, and the type's name. For example, if a WorkspaceType `example` is created in the `root:org` workspace, the implicit initializer name is `root:org:Example`.", + Type: []string{"boolean"}, Format: "", }, }, - "export": { + "extend": { SchemaProps: spec.SchemaProps{ - Description: "export is the name of the APIExport.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"export"}, - }, - }, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_VirtualWorkspace(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "url is a WorkspaceType initialization virtual workspace URL.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"url"}, - }, - }, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_Workspace(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Workspace defines a generic Kubernetes-cluster-like endpoint, with standard Kubernetes discovery APIs, OpenAPI and resource API endpoints.\n\nA workspace can be backed by different concrete types of workspace implementation, depending on access pattern. All workspace implementations share the characteristic that the URL that serves a given workspace can be used with standard Kubernetes API machinery and client libraries and command line tools.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceStatus"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceSpec", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceList is a list of Workspaces", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.Workspace"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.Workspace", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "selector is a label selector that filters workspace scheduling targets.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceSpec holds the desired state of the Workspace.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "type defines properties of the workspace both on creation (e.g. initial resources and initially installed APIs) and during runtime (e.g. permissions). If no type is provided, the default type for the workspace in which this workspace is nesting will be used.\n\nThe type is a reference to a WorkspaceType in the listed workspace, but lower-cased. The WorkspaceType existence is validated at admission during creation. The type is immutable after creation. The use of a type is gated via the RBAC workspacetypes/use resource permission.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), - }, - }, - "location": { - SchemaProps: spec.SchemaProps{ - Description: "location constraints where this workspace can be scheduled to.\n\nIf the no location is specified, an arbitrary location is chosen.", - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceLocation"), - }, - }, - "cluster": { - SchemaProps: spec.SchemaProps{ - Description: "cluster is the name of the logical cluster this workspace is stored under.\n\nSet by the system.", - Type: []string{"string"}, - Format: "", - }, - }, - "URL": { - SchemaProps: spec.SchemaProps{ - Description: "URL is the address under which the Kubernetes-cluster-like endpoint can be found. This URL can be used to access the workspace with standard Kubernetes client libraries and command line tools.\n\nSet by the system.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceLocation", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceStatus communicates the observed state of the Workspace.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase of the workspace (Scheduling, Initializing, Ready).", - Type: []string{"string"}, - Format: "", - }, - }, - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "Current processing state of the Workspace.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"), - }, - }, - }, - }, - }, - "initializers": { - SchemaProps: spec.SchemaProps{ - Description: "initializers must be cleared by a controller before the workspace is ready and can be used.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceType(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceType specifies behaviour of workspaces of this type.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSpec", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeExtension(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceTypeExtension defines how other WorkspaceTypes are composed together to add functionality to the owning WorkspaceType.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "with": { - SchemaProps: spec.SchemaProps{ - Description: "with are WorkspaceTypes whose initializers are added to the list for the owning type, and for whom the owning type becomes an alias, as long as all of their required types are not mentioned in without.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceTypeList is a list of workspace types", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceType"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceType", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceTypeReference is a globally unique, fully qualified reference to a workspace type.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name is the name of the WorkspaceType", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "path is an absolute reference to the workspace that owns this type, e.g. root:org:ws.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceTypeSelector describes a set of types.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "none": { - SchemaProps: spec.SchemaProps{ - Description: "none means that no type matches.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "types": { - SchemaProps: spec.SchemaProps{ - Description: "types is a list of WorkspaceTypes that match. A workspace type extending another workspace type automatically is considered as that extended type as well (even transitively).\n\nAn empty list matches all types.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "initializer": { - SchemaProps: spec.SchemaProps{ - Description: "initializer determines if this WorkspaceType has an associated initializing controller. These controllers are used to add functionality to a Workspace; all controllers must finish their work before the Workspace becomes ready for use.\n\nOne initializing controller is supported per WorkspaceType; the identifier for this initializer will be a colon-delimited string using the workspace in which the WorkspaceType is defined, and the type's name. For example, if a WorkspaceType `example` is created in the `root:org` workspace, the implicit initializer name is `root:org:Example`.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "extend": { - SchemaProps: spec.SchemaProps{ - Description: "extend is a list of other WorkspaceTypes whose initializers and limitAllowedChildren and limitAllowedParents this WorkspaceType is inheriting. By (transitively) extending another WorkspaceType, this WorkspaceType will be considered as that other type in evaluation of limitAllowedChildren and limitAllowedParents constraints.\n\nA dependency cycle stop this WorkspaceType from being admitted as the type of a Workspace.\n\nA non-existing dependency stop this WorkspaceType from being admitted as the type of a Workspace.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeExtension"), - }, - }, - "additionalWorkspaceLabels": { - SchemaProps: spec.SchemaProps{ - Description: "additionalWorkspaceLabels are a set of labels that will be added to a Workspace on creation.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "defaultChildWorkspaceType": { - SchemaProps: spec.SchemaProps{ - Description: "defaultChildWorkspaceType is the WorkspaceType that will be used by default if another, nested Workspace is created in a workspace of this type. When this field is unset, the user must specify a type when creating nested workspaces. Extending another WorkspaceType does not inherit its defaultChildWorkspaceType.", - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), - }, - }, - "limitAllowedChildren": { - SchemaProps: spec.SchemaProps{ - Description: "limitAllowedChildren specifies constraints for sub-workspaces created in workspaces of this type. These are in addition to child constraints of types this one extends.", - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSelector"), - }, - }, - "limitAllowedParents": { - SchemaProps: spec.SchemaProps{ - Description: "limitAllowedParents specifies constraints for the parent workspace that workspaces of this type are created in. These are in addition to parent constraints of types this one extends.", - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSelector"), - }, - }, - "defaultAPIBindings": { - SchemaProps: spec.SchemaProps{ - Description: "defaultAPIBindings are the APIs to bind during initialization of workspaces created from this type. The APIBinding names will be generated dynamically.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeExtension", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSelector"}, - } -} - -func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspaceTypeStatus defines the observed state of WorkspaceType.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "conditions is a list of conditions that apply to the APIExport.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"), - }, - }, - }, - }, - }, - "virtualWorkspaces": { - SchemaProps: spec.SchemaProps{ - Description: "virtualWorkspaces contains all APIExport virtual workspace URLs.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.VirtualWorkspace"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.VirtualWorkspace", "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, - } -} - -func schema_conditions_apis_conditions_v1alpha1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Condition defines an observation of a object operational state.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the condition, one of True, False, Unknown.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "severity": { - SchemaProps: spec.SchemaProps{ - Description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", - Type: []string{"string"}, - Format: "", - }, - }, - "lastTransitionTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about the transition. This field may be empty.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status", "lastTransitionTime"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_sdk_apis_topology_v1alpha1_Partition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Partition defines the selection of a set of shards along multiple dimensions. Partitions can get automatically generated through a partitioner or manually crafted.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "spec holds the desired state.", + Description: "extend is a list of other WorkspaceTypes whose initializers and limitAllowedChildren and limitAllowedParents this WorkspaceType is inheriting. By (transitively) extending another WorkspaceType, this WorkspaceType will be considered as that other type in evaluation of limitAllowedChildren and limitAllowedParents constraints.\n\nA dependency cycle stop this WorkspaceType from being admitted as the type of a Workspace.\n\nA non-existing dependency stop this WorkspaceType from being admitted as the type of a Workspace.", Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_sdk_apis_topology_v1alpha1_PartitionList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PartitionList is a list of Partition resources.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeExtension"), }, }, - "items": { + "additionalWorkspaceLabels": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "additionalWorkspaceLabels are a set of labels that will be added to a Workspace on creation.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.Partition"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.Partition", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_sdk_apis_topology_v1alpha1_PartitionSet(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PartitionSet defines a target domain and dimensions to divide a set of shards into 1 or more partitions.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "spec holds the desired state.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "status holds information about the current status", - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetSpec", "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_sdk_apis_topology_v1alpha1_PartitionSetList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PartitionSetList is a list of PartitionSet resources.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { + "defaultChildWorkspaceType": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", + Description: "defaultChildWorkspaceType is the WorkspaceType that will be used by default if another, nested Workspace is created in a workspace of this type. When this field is unset, the user must specify a type when creating nested workspaces. Extending another WorkspaceType does not inherit its defaultChildWorkspaceType.", + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference"), }, }, - "metadata": { + "limitAllowedChildren": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Description: "limitAllowedChildren specifies constraints for sub-workspaces created in workspaces of this type. These are in addition to child constraints of types this one extends.", + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSelector"), }, }, - "items": { + "limitAllowedParents": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSet"), - }, - }, - }, + Description: "limitAllowedParents specifies constraints for the parent workspace that workspaces of this type are created in. These are in addition to parent constraints of types this one extends.", + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSelector"), }, }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_sdk_apis_topology_v1alpha1_PartitionSetSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PartitionSetSpec records dimensions and a target domain for the partitioning.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "dimensions": { + "defaultAPIBindings": { SchemaProps: spec.SchemaProps{ - Description: "dimensions (optional) are used to group shards into partitions", + Description: "defaultAPIBindings are the APIs to bind during initialization of workspaces created from this type. The APIBinding names will be generated dynamically.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference"), }, }, }, }, }, - "shardSelector": { - SchemaProps: spec.SchemaProps{ - Description: "shardSelector (optional) specifies filtering for shard targets.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, }, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeExtension", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeReference", "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.WorkspaceTypeSelector"}, } } -func schema_sdk_apis_topology_v1alpha1_PartitionSetStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_tenancy_v1alpha1_WorkspaceTypeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PartitionSetStatus records the status of the PartitionSet.", + Description: "WorkspaceTypeStatus defines the observed state of WorkspaceType.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "count": { - SchemaProps: spec.SchemaProps{ - Description: "count is the total number of partitions.", - Type: []string{"integer"}, - Format: "int32", - }, - }, "conditions": { SchemaProps: spec.SchemaProps{ - Description: "conditions is a list of conditions that apply to the APIExportEndpointSlice.", + Description: "conditions is a list of conditions that apply to the APIExport.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -4370,83 +3534,93 @@ func schema_sdk_apis_topology_v1alpha1_PartitionSetStatus(ref common.ReferenceCa }, }, }, - }, - }, - }, - Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, - } -} - -func schema_sdk_apis_topology_v1alpha1_PartitionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PartitionSpec records the values defining the partition along multiple dimensions.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "selector": { + "virtualWorkspaces": { SchemaProps: spec.SchemaProps{ - Description: "selector (optional) is a label selector that filters shard targets.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + Description: "virtualWorkspaces contains all APIExport virtual workspace URLs.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.VirtualWorkspace"), + }, + }, + }, }, }, }, }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.VirtualWorkspace", "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, } } -func schema_sdk_apis_workload_v1alpha1_ResourceToSync(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_conditions_apis_conditions_v1alpha1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "Condition defines an observation of a object operational state.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "versions": { + "type": { SchemaProps: spec.SchemaProps{ - Description: "versions are the resource versions the syncer can choose to sync depending on availability on the downstream cluster. Conversion to the storage version, if necessary, will be done on the kcp side. The versions are ordered by precedence and the first version compatible is preferred by syncer.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + Default: "", + Type: []string{"string"}, + Format: "", }, }, - "identityHash": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "identityHash is the identity for a given APIExport that the APIResourceSchema belongs to. The hash can be found on APIExport and APIResourceSchema's status. It will be empty for core types.", + Description: "Status of the condition, one of True, False, Unknown.", Default: "", Type: []string{"string"}, Format: "", }, }, - "state": { + "severity": { + SchemaProps: spec.SchemaProps{ + Description: "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { SchemaProps: spec.SchemaProps{ - Description: "state indicate whether the resources schema is compatible to the SyncTarget. It must be updated by syncer after checking the API compatibility on SyncTarget.", + Description: "A human readable message indicating details about the transition. This field may be empty.", Type: []string{"string"}, Format: "", }, }, }, - Required: []string{"versions"}, + Required: []string{"type", "status", "lastTransitionTime"}, }, }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } -func schema_sdk_apis_workload_v1alpha1_SyncTarget(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_topology_v1alpha1_Partition(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SyncTarget describes a member cluster capable of running workloads.", + Description: "Partition defines the selection of a set of shards along multiple dimensions. Partitions can get automatically generated through a partitioner or manually crafted.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -4471,31 +3645,24 @@ func schema_sdk_apis_workload_v1alpha1_SyncTarget(ref common.ReferenceCallback) }, "spec": { SchemaProps: spec.SchemaProps{ - Description: "Spec holds the desired state.", - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status communicates the observed state.", + Description: "spec holds the desired state.", Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetStatus"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSpec"), }, }, }, }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetSpec", "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTargetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_sdk_apis_workload_v1alpha1_SyncTargetList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_topology_v1alpha1_PartitionList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SyncTargetList is a list of SyncTarget resources", + Description: "PartitionList is a list of Partition resources.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -4525,7 +3692,7 @@ func schema_sdk_apis_workload_v1alpha1_SyncTargetList(ref common.ReferenceCallba Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTarget"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.Partition"), }, }, }, @@ -4536,223 +3703,198 @@ func schema_sdk_apis_workload_v1alpha1_SyncTargetList(ref common.ReferenceCallba }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.SyncTarget", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.Partition", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_sdk_apis_workload_v1alpha1_SyncTargetSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_topology_v1alpha1_PartitionSet(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SyncTargetSpec holds the desired state of the SyncTarget (from the client).", + Description: "PartitionSet defines a target domain and dimensions to divide a set of shards into 1 or more partitions.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "unschedulable": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Unschedulable controls cluster schedulability of new workloads. By default, cluster is schedulable.", - Default: false, - Type: []string{"boolean"}, + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, Format: "", }, }, - "evictAfter": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "EvictAfter controls cluster schedulability of new and existing workloads. After the EvictAfter time, any workload scheduled to the cluster will be unassigned from the cluster. By default, workloads scheduled to the cluster are not evicted.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "supportedAPIExports": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "SupportedAPIExports defines a set of APIExports supposed to be supported by this SyncTarget. The SyncTarget will be selected to deploy the workload only when the resource schema on the SyncTarget is compatible with the resource schema included in the exports.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference"), - }, - }, - }, + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "cells": { + "spec": { SchemaProps: spec.SchemaProps{ - Description: "Cells is a set of labels to identify the cells the SyncTarget belongs to. SyncTargets with the same cells run as they are in the same physical cluster. Each key/value pair in the cells should be added and updated by service providers (i.e. a network provider updates one key/value, while the storage provider updates another.)", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "spec holds the desired state.", + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "status holds information about the current status", + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetStatus"), }, }, }, }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1.APIExportReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetSpec", "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_sdk_apis_workload_v1alpha1_SyncTargetStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_topology_v1alpha1_PartitionSetList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "SyncTargetStatus communicates the observed state of the SyncTarget (from the controller).", + Description: "PartitionSetList is a list of PartitionSet resources.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "allocatable": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Allocatable represents the resources that are available for scheduling.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", }, }, - "capacity": { + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "Capacity represents the total resources of the cluster.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "conditions": { + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Current processing state of the SyncTarget.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"), - }, - }, - }, + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, - "syncedResources": { + "items": { SchemaProps: spec.SchemaProps{ - Description: "SyncedResources represents the resources that the syncer of the SyncTarget can sync. It MUST be updated by kcp server.", - Type: []string{"array"}, + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.ResourceToSync"), + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSet"), }, }, }, }, }, - "lastSyncerHeartbeatTime": { - SchemaProps: spec.SchemaProps{ - Description: "A timestamp indicating when the syncer last reported status.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "virtualWorkspaces": { + }, + Required: []string{"metadata", "items"}, + }, + }, + Dependencies: []string{ + "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1.PartitionSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_sdk_apis_topology_v1alpha1_PartitionSetSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PartitionSetSpec records dimensions and a target domain for the partitioning.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "dimensions": { SchemaProps: spec.SchemaProps{ - Description: "VirtualWorkspaces contains all virtual workspace URLs.", + Description: "dimensions (optional) are used to group shards into partitions", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.VirtualWorkspace"), + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "tunnelWorkspaces": { + "shardSelector": { SchemaProps: spec.SchemaProps{ - Description: "TunnelWorkspaces contains all URLs (one per shard) that point to the SyncTarget workspace in order to setup the tunneler.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.TunnelWorkspace"), - }, - }, - }, + Description: "shardSelector (optional) specifies filtering for shard targets.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, }, }, }, Dependencies: []string{ - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition", "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.ResourceToSync", "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.TunnelWorkspace", "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1.VirtualWorkspace", "k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } -func schema_sdk_apis_workload_v1alpha1_TunnelWorkspace(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_topology_v1alpha1_PartitionSetStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "PartitionSetStatus records the status of the PartitionSet.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "url": { + "count": { SchemaProps: spec.SchemaProps{ - Description: "url is the URL the Syncer should use to connect to the Syncer tunnel for a given shard.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "count is the total number of partitions.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "conditions is a list of conditions that apply to the APIExportEndpointSlice.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"), + }, + }, + }, }, }, }, - Required: []string{"url"}, }, }, + Dependencies: []string{ + "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1.Condition"}, } } -func schema_sdk_apis_workload_v1alpha1_VirtualWorkspace(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_sdk_apis_topology_v1alpha1_PartitionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "PartitionSpec records the values defining the partition along multiple dimensions.", + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "syncerURL": { - SchemaProps: spec.SchemaProps{ - Description: "SyncerURL is the URL of the syncer virtual workspace.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "upsyncerURL": { + "selector": { SchemaProps: spec.SchemaProps{ - Description: "UpsyncerURL is the URL of the upsyncer virtual workspace.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "selector (optional) is a label selector that filters shard targets.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), }, }, }, - Required: []string{"syncerURL", "upsyncerURL"}, }, }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, } } diff --git a/pkg/reconciler/cache/replication/replication_controller.go b/pkg/reconciler/cache/replication/replication_controller.go index 89b98a8ab73..792c080093e 100644 --- a/pkg/reconciler/cache/replication/replication_controller.go +++ b/pkg/reconciler/cache/replication/replication_controller.go @@ -44,9 +44,7 @@ import ( apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/core" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" ) @@ -127,16 +125,6 @@ func NewController( local: localKcpInformers.Tenancy().V1alpha1().WorkspaceTypes().Informer(), global: globalKcpInformers.Tenancy().V1alpha1().WorkspaceTypes().Informer(), }, - workloadv1alpha1.SchemeGroupVersion.WithResource("synctargets"): { - kind: "SyncTarget", - local: localKcpInformers.Workload().V1alpha1().SyncTargets().Informer(), - global: globalKcpInformers.Workload().V1alpha1().SyncTargets().Informer(), - }, - schedulingv1alpha1.SchemeGroupVersion.WithResource("locations"): { - kind: "Location", - local: localKcpInformers.Scheduling().V1alpha1().Locations().Informer(), - global: globalKcpInformers.Scheduling().V1alpha1().Locations().Informer(), - }, rbacv1.SchemeGroupVersion.WithResource("clusterroles"): { kind: "ClusterRole", filter: func(u *unstructured.Unstructured) bool { diff --git a/pkg/reconciler/coordination/deployment/deployment_controller.go b/pkg/reconciler/coordination/deployment/deployment_controller.go deleted file mode 100644 index 37f8d48386a..00000000000 --- a/pkg/reconciler/coordination/deployment/deployment_controller.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "context" - "fmt" - "reflect" - "strings" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - v1 "github.com/kcp-dev/client-go/informers/apps/v1" - kubernetesclient "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - "github.com/kcp-dev/kcp/sdk/apis/workload/helpers" - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - "github.com/kcp-dev/kcp/tmc/pkg/coordination" -) - -const ( - controllerName = "kcp-deployment-coordination" -) - -type Deployment = appsv1.Deployment -type DeploymentSpec = appsv1.DeploymentSpec -type DeploymentStatus = appsv1.DeploymentStatus -type Patcher = appsv1client.DeploymentInterface -type Resource = committer.Resource[*DeploymentSpec, *DeploymentStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -// NewController returns a new controller instance. -func NewController( - ctx context.Context, - kubeClusterClient kubernetesclient.ClusterInterface, - deploymentClusterInformer v1.DeploymentClusterInformer, -) (*controller, error) { - lister := deploymentClusterInformer.Lister() - informer := deploymentClusterInformer.Informer() - - c := &controller{ - upstreamViewQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName+"upstream-view"), - syncerViewQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName+"syncer-view"), - syncerViewRetriever: coordination.NewDefaultSyncerViewManager[*appsv1.Deployment](), - gvr: appsv1.SchemeGroupVersion.WithResource("deployments"), - - getDeployment: func(clusterName logicalcluster.Name, namespace, name string) (*appsv1.Deployment, error) { - return lister.Cluster(clusterName).Deployments(namespace).Get(name) - }, - patcher: func(clusterName logicalcluster.Name, namespace string) committer.Patcher[*appsv1.Deployment] { - return kubeClusterClient.AppsV1().Deployments().Cluster(clusterName.Path()).Namespace(namespace) - }, - } - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - - _, _ = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - enqueue(obj, c.upstreamViewQueue, logger.WithValues("view", "upstream")) - enqueue(obj, c.syncerViewQueue, logger.WithValues("view", "syncer")) - }, - UpdateFunc: func(old, new interface{}) { - oldObj, ok := old.(coordination.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a coordination.Object, but was %T", oldObj)) - return - } - newObj, ok := new.(coordination.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a coordination.Object, but was %T", newObj)) - return - } - - if coordination.AnySyncerViewChanged(oldObj, newObj) { - enqueue(new, c.syncerViewQueue, logger.WithValues("view", "syncer")) - } - if coordination.UpstreamViewChanged(oldObj, newObj, deploymentContentsEqual) { - enqueue(new, c.upstreamViewQueue, logger.WithValues("view", "upstream")) - } - }, - }) - - return c, nil -} - -// controller reconciles watches deployments and coordinates them between SyncTargets. -type controller struct { - upstreamViewQueue workqueue.RateLimitingInterface - syncerViewQueue workqueue.RateLimitingInterface - - getDeployment func(clusterName logicalcluster.Name, namespace, name string) (*appsv1.Deployment, error) - patcher func(clusterName logicalcluster.Name, namespace string) committer.Patcher[*appsv1.Deployment] - - syncerViewRetriever coordination.SyncerViewRetriever[*appsv1.Deployment] - gvr schema.GroupVersionResource -} - -func (c *controller) committer(clusterName logicalcluster.Name, namespace string) CommitFunc { - return committer.NewCommitterScoped[*Deployment, Patcher, *DeploymentSpec, *DeploymentStatus](c.patcher(clusterName, namespace)) -} - -func filter[K comparable, V interface{}](aMap map[K]V, keep func(key K) bool) map[K]V { - result := make(map[K]V) - for key, val := range aMap { - if keep(key) { - result[key] = val - } - } - return result -} - -func deploymentContentsEqual(old, new interface{}) bool { - oldDeployment, ok := old.(*appsv1.Deployment) - if !ok { - return false - } - newDeployment, ok := new.(*appsv1.Deployment) - if !ok { - return false - } - - if !equality.Semantic.DeepEqual(oldDeployment.Labels, newDeployment.Labels) { - return false - } - - oldAnnotations := filter(oldDeployment.Annotations, func(key string) bool { - return !strings.HasPrefix(key, v1alpha1.ClusterSpecDiffAnnotationPrefix) - }) - newAnnotations := filter(newDeployment.Annotations, func(key string) bool { - return !strings.HasPrefix(key, v1alpha1.ClusterSpecDiffAnnotationPrefix) - }) - if !equality.Semantic.DeepEqual(oldAnnotations, newAnnotations) { - return false - } - - oldReplicas := oldDeployment.Spec.Replicas - newReplicas := newDeployment.Spec.Replicas - - if oldReplicas == nil && newReplicas == nil { - return true - } - - return oldReplicas != nil && newReplicas != nil && *oldReplicas == *newReplicas -} - -// enqueue adds the logical cluster to the queue. -func enqueue(obj interface{}, queue workqueue.RateLimitingInterface, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return - } - logger = logging.WithQueueKey(logger, key) - logger.V(2).Info("queueing deployment") - queue.Add(key) -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer utilruntime.HandleCrash() - defer c.upstreamViewQueue.ShutDown() - defer c.syncerViewQueue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startUpstreamViewWorker, time.Second) - go wait.UntilWithContext(ctx, c.startSyncerViewWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startUpstreamViewWorker(ctx context.Context) { - logger := klog.FromContext(ctx).WithValues("view", "upstream") - ctx = klog.NewContext(ctx, logger) - for processNextWorkItem(ctx, c.upstreamViewQueue, c.processUpstreamView) { - } -} - -func (c *controller) startSyncerViewWorker(ctx context.Context) { - logger := klog.FromContext(ctx).WithValues("view", "syncer") - ctx = klog.NewContext(ctx, logger) - for processNextWorkItem(ctx, c.syncerViewQueue, c.processSyncerView) { - } -} - -func processNextWorkItem(ctx context.Context, queue workqueue.RateLimitingInterface, process func(context.Context, string) error) bool { - // Wait until there is a new item in the working queue - k, quit := queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer queue.Done(key) - - if err := process(ctx, key); err != nil { - utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", controllerName, key, err)) - queue.AddRateLimited(key) - return true - } - queue.Forget(key) - return true -} - -func (c *controller) processUpstreamView(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - - clusterName, namespace, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "failed to split key, dropping") - return nil - } - - deployment, err := c.getDeployment(clusterName, namespace, name) - if err != nil { - return err - } - logger = logging.WithObject(logger, deployment) - ctx = klog.NewContext(ctx, logger) - - syncIntents, err := helpers.GetSyncIntents(deployment) - if err != nil { - return err - } - - updated := deployment.DeepCopy() - - syncerViews := sets.New[string]() - for syncTarget, syncTargetSyncing := range syncIntents { - if syncTargetSyncing.ResourceState == v1alpha1.ResourceStateSync && syncTargetSyncing.DeletionTimestamp == nil { - syncerViews.Insert(syncTarget) - } - } - - newSpecDiffAnnotation := make(map[string]string, len(syncerViews)) - if len(syncerViews) > 0 { - replicasEach := int64(*updated.Spec.Replicas) / int64(syncerViews.Len()) - rest := int64(*updated.Spec.Replicas) % int64(syncerViews.Len()) - - for index, syncTargetKey := range sets.List[string](syncerViews) { - replicasToSet := replicasEach - if index == 0 { - replicasToSet += rest - } - newSpecDiffAnnotation[v1alpha1.ClusterSpecDiffAnnotationPrefix+syncTargetKey] = fmt.Sprintf(`[{ "op": "replace", "path": "/replicas", "value": %d }]`, replicasToSet) - } - } - for key := range updated.Annotations { - if _, found := newSpecDiffAnnotation[key]; !found && - strings.HasPrefix(key, v1alpha1.ClusterSpecDiffAnnotationPrefix) { - delete(updated.Annotations, key) - } - } - - if updated.Annotations == nil { - updated.Annotations = make(map[string]string, len(newSpecDiffAnnotation)) - } - for key, value := range newSpecDiffAnnotation { - updated.Annotations[key] = value - } - - return c.committer(clusterName, namespace)(ctx, - &committer.Resource[*appsv1.DeploymentSpec, *appsv1.DeploymentStatus]{ - ObjectMeta: deployment.ObjectMeta, - }, - &committer.Resource[*appsv1.DeploymentSpec, *appsv1.DeploymentStatus]{ - ObjectMeta: updated.ObjectMeta, - }) -} - -func (c *controller) processSyncerView(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - - clusterName, namespace, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "failed to split key, dropping") - return nil - } - - deployment, err := c.getDeployment(clusterName, namespace, name) - if err != nil { - return err - } - logger = logging.WithObject(logger, deployment) - ctx = klog.NewContext(ctx, logger) - - summarizedStatus := appsv1.DeploymentStatus{} - - syncerViews, err := c.syncerViewRetriever.GetAllSyncerViews(ctx, c.gvr, deployment) - if err != nil { - return err - } - - var emptyStatus appsv1.DeploymentStatus - consolidatedConditions := make(map[appsv1.DeploymentConditionType]appsv1.DeploymentCondition) - - for _, syncerView := range syncerViews { - if reflect.DeepEqual(syncerView.Status, emptyStatus) { - continue - } - - summarizedStatus.Replicas += syncerView.Status.Replicas - summarizedStatus.UpdatedReplicas += syncerView.Status.UpdatedReplicas - summarizedStatus.ReadyReplicas += syncerView.Status.ReadyReplicas - summarizedStatus.AvailableReplicas += syncerView.Status.AvailableReplicas - summarizedStatus.UnavailableReplicas += syncerView.Status.UnavailableReplicas - - for _, condition := range syncerView.Status.Conditions { - if consolidated, ok := consolidatedConditions[condition.Type]; !ok { - consolidatedConditions[condition.Type] = condition - } else { - switch consolidated.Status { - case corev1.ConditionUnknown: - consolidatedConditions[condition.Type] = condition - case corev1.ConditionFalse: - if condition.Status == corev1.ConditionTrue { - consolidatedConditions[condition.Type] = condition - } - } - } - } - } - - conditionTypes := sets.New[string]() - for conditionType := range consolidatedConditions { - conditionTypes.Insert(string(conditionType)) - } - - for _, condition := range sets.List[string](conditionTypes) { - summarizedStatus.Conditions = append(summarizedStatus.Conditions, consolidatedConditions[appsv1.DeploymentConditionType(condition)]) - } - - return c.committer(clusterName, namespace)(ctx, - &committer.Resource[*appsv1.DeploymentSpec, *appsv1.DeploymentStatus]{ - ObjectMeta: deployment.ObjectMeta, - Status: &deployment.Status, - }, - &committer.Resource[*appsv1.DeploymentSpec, *appsv1.DeploymentStatus]{ - ObjectMeta: deployment.ObjectMeta, - Status: &summarizedStatus, - }) -} diff --git a/pkg/reconciler/coordination/deployment/deployment_controller_test.go b/pkg/reconciler/coordination/deployment/deployment_controller_test.go deleted file mode 100644 index 9a02ba5548a..00000000000 --- a/pkg/reconciler/coordination/deployment/deployment_controller_test.go +++ /dev/null @@ -1,435 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "context" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - "github.com/kcp-dev/kcp/tmc/pkg/coordination" -) - -func intPtr(i int32) *int32 { - res := i - return &res -} - -type mockedPatcher struct { - clusterName logicalcluster.Name - namespace string - appliedPatch string -} - -func (p *mockedPatcher) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*Deployment, error) { - p.appliedPatch = string(data) - return nil, nil -} - -func TestUpstreamViewReconciler(t *testing.T) { - tests := map[string]struct { - input *appsv1.Deployment - appliedPatch string - wantError bool - }{ - "spread requested replicas": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "Sync", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - appliedPatch: `{"metadata":{"annotations":{"experimental.spec-diff.workload.kcp.io/syncTarget1":"[{ \"op\": \"replace\", \"path\": \"/replicas\", \"value\": 4 }]","experimental.spec-diff.workload.kcp.io/syncTarget2":"[{ \"op\": \"replace\", \"path\": \"/replicas\", \"value\": 3 }]"},"resourceVersion":"resourceVersion","uid":"uid"}}`, - }, - "remove obsolete spec-diff annotation": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - }, - Annotations: map[string]string{ - "experimental.spec-diff.workload.kcp.io/syncTarget1": "[{ \"op\": \"replace\", \"path\": \"/replicas\", \"value\": 3 }]", - "experimental.spec-diff.workload.kcp.io/syncTarget2": "[{ \"op\": \"replace\", \"path\": \"/replicas\", \"value\": 4 }]", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - appliedPatch: `{"metadata":{"annotations":{"experimental.spec-diff.workload.kcp.io/syncTarget1":"[{ \"op\": \"replace\", \"path\": \"/replicas\", \"value\": 7 }]","experimental.spec-diff.workload.kcp.io/syncTarget2":null},"resourceVersion":"resourceVersion","uid":"uid"}}`, - }, - "don't take empty labels into account": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - appliedPatch: `{"metadata":{"annotations":{"experimental.spec-diff.workload.kcp.io/syncTarget1":"[{ \"op\": \"replace\", \"path\": \"/replicas\", \"value\": 7 }]"},"resourceVersion":"resourceVersion","uid":"uid"}}`, - }, - "Invalid deletion annotation": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "Sync", - }, - Annotations: map[string]string{ - "deletion.internal.workload.kcp.io/syncTarget2": "wrong format", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - wantError: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var patcher *mockedPatcher - controller := controller{ - getDeployment: func(lclusterName logicalcluster.Name, namespace, name string) (*appsv1.Deployment, error) { - return tc.input, nil - }, - patcher: func(clusterName logicalcluster.Name, namespace string) committer.Patcher[*appsv1.Deployment] { - patcher = &mockedPatcher{ - clusterName: clusterName, - namespace: namespace, - } - return patcher - }, - syncerViewRetriever: coordination.NewDefaultSyncerViewManager[*appsv1.Deployment](), - } - - err := controller.processUpstreamView(context.Background(), "") - if tc.wantError { - require.Error(t, err) - return - } else { - require.NoError(t, err) - } - - require.Equal(t, tc.appliedPatch, patcher.appliedPatch) - }) - } -} - -func TestSyncerViewReconciler(t *testing.T) { - tests := map[string]struct { - input *appsv1.Deployment - appliedPatch string - wantError bool - }{ - "summarize available replicas": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Annotations: map[string]string{ - "diff.syncer.internal.kcp.io/syncTarget1": `{ "status": { "availableReplicas": 4 }}`, - "diff.syncer.internal.kcp.io/syncTarget2": `{ "status": { "availableReplicas": 3 }}`, - }, - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "Sync", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - appliedPatch: `{"metadata":{"resourceVersion":"resourceVersion","uid":"uid"},"status":{"availableReplicas":7}}`, - }, - "summarize available replicas - one with empty status": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Annotations: map[string]string{ - "diff.syncer.internal.kcp.io/syncTarget1": `{ "status": { "availableReplicas": 4 }}`, - "diff.syncer.internal.kcp.io/syncTarget2": `{ "status": { "availableReplicas": 3 }}`, - "diff.syncer.internal.kcp.io/syncTarget3": `{ "status": {}}`, - }, - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "Sync", - "state.workload.kcp.io/syncTarget3": "Sync", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - appliedPatch: `{"metadata":{"resourceVersion":"resourceVersion","uid":"uid"},"status":{"availableReplicas":7}}`, - }, - "summarize conditions": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Annotations: map[string]string{ - "diff.syncer.internal.kcp.io/syncTarget1": `{ "status": { "conditions": [ { "type": "Available", "status": "True" }, {"type": "ReplicaFailure", "status": "Unknown" }, { "type": "Progressing", "status": "False" } ] } }`, - "diff.syncer.internal.kcp.io/syncTarget2": `{ "status": { "conditions": [ { "type": "ReplicaFailure", "status": "False" }, { "type": "Progressing", "status": "True" } ] } }`, - }, - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "Sync", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - appliedPatch: `{"metadata":{"resourceVersion":"resourceVersion","uid":"uid"},"status":{"conditions":[{"lastTransitionTime":null,"lastUpdateTime":null,"status":"True","type":"Available"},{"lastTransitionTime":null,"lastUpdateTime":null,"status":"True","type":"Progressing"},{"lastTransitionTime":null,"lastUpdateTime":null,"status":"False","type":"ReplicaFailure"}]}}`, - }, - "invalid syncer views": { - input: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: types.UID("uid"), - ResourceVersion: "resourceVersion", - Annotations: map[string]string{ - "diff.syncer.internal.kcp.io/syncTarget1": `invalid json`, - "diff.syncer.internal.kcp.io/syncTarget2": `{ "status": { "availableReplicas": 3 }}`, - }, - Labels: map[string]string{ - "state.workload.kcp.io/syncTarget1": "Sync", - "state.workload.kcp.io/syncTarget2": "Sync", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(7), - }, - }, - wantError: true, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var patcher *mockedPatcher - controller := controller{ - getDeployment: func(lclusterName logicalcluster.Name, namespace, name string) (*appsv1.Deployment, error) { - return tc.input, nil - }, - patcher: func(clusterName logicalcluster.Name, namespace string) committer.Patcher[*appsv1.Deployment] { - patcher = &mockedPatcher{ - clusterName: clusterName, - namespace: namespace, - } - return patcher - }, - syncerViewRetriever: coordination.NewDefaultSyncerViewManager[*appsv1.Deployment](), - } - - err := controller.processSyncerView(context.Background(), "") - if tc.wantError { - require.Error(t, err) - return - } else { - require.NoError(t, err) - } - - require.Equal(t, tc.appliedPatch, patcher.appliedPatch) - }) - } -} - -func TestDeploymentContentsEqual(t *testing.T) { - tests := map[string]struct { - old, new *appsv1.Deployment - result bool - }{ - "only spec-diff annotation differs": { - old: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "experimental.spec-diff.workload.kcp.io/syncTarget1": "value 1", - }, - }, - }, - new: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "experimental.spec-diff.workload.kcp.io/syncTarget1": "value 2", - }, - }, - }, - result: true, - }, - "annotations differs": { - old: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "experimental.spec-diff.workload.kcp.io/syncTarget1": "value 1", - }, - }, - }, - new: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "experimental.spec-diff.workload.kcp.io/syncTarget1": "value 2", - "anotherone": "othervalue", - }, - }, - }, - result: false, - }, - "labels differs": { - old: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "myLabel1": "value 1", - }, - }, - }, - new: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "myLabel2": "value 2", - }, - }, - }, - result: false, - }, - "labels equal": { - old: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "myLabel": "value", - }, - }, - }, - new: &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "myLabel": "value", - }, - }, - }, - result: true, - }, - "both replicas are nil - other content differ": { - old: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - MinReadySeconds: 3, - Replicas: nil, - }, - }, - new: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - MinReadySeconds: 4, - Replicas: nil, - }, - }, - result: true, - }, - "only old replicas is nil": { - old: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: nil, - }, - }, - new: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(4), - }, - }, - result: false, - }, - "only new replicas is nil": { - old: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(4), - }, - }, - new: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: nil, - }, - }, - result: false, - }, - "non-nil replicas equal - other content differ": { - old: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - MinReadySeconds: 3, - Replicas: intPtr(4), - }, - }, - new: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - MinReadySeconds: 4, - Replicas: intPtr(4), - }, - }, - result: true, - }, - "non-nil replicas diff": { - old: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(4), - }, - }, - new: &appsv1.Deployment{ - Spec: appsv1.DeploymentSpec{ - Replicas: intPtr(5), - }, - }, - result: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - require.Equal(t, tc.result, deploymentContentsEqual(tc.old, tc.new)) - }) - } -} diff --git a/pkg/reconciler/scheduling/location/location_controller.go b/pkg/reconciler/scheduling/location/location_controller.go deleted file mode 100644 index f43a20fb747..00000000000 --- a/pkg/reconciler/scheduling/location/location_controller.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package location - -import ( - "context" - "fmt" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" - schedulingv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - schedulingv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/scheduling/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - controllerName = "kcp-scheduling-location-status" -) - -// NewController returns a new controller reconciling location status. -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - locationInformer schedulingv1alpha1informers.LocationClusterInformer, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName) - - c := &controller{ - queue: queue, - enqueueAfter: func(location *schedulingv1alpha1.Location, duration time.Duration) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(location) - if err != nil { - runtime.HandleError(err) - return - } - queue.AddAfter(key, duration) - }, - kcpClusterClient: kcpClusterClient, - locationLister: locationInformer.Lister(), - syncTargetLister: syncTargetInformer.Lister(), - commit: committer.NewCommitter[*Location, Patcher, *LocationSpec, *LocationStatus](kcpClusterClient.SchedulingV1alpha1().Locations()), - } - - _, _ = locationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueLocation(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueLocation(obj) }, - DeleteFunc: func(obj interface{}) { c.enqueueLocation(obj) }, - }) - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueSyncTarget(obj) }, - UpdateFunc: func(old, obj interface{}) { - oldCluster, ok := old.(*workloadv1alpha1.SyncTarget) - if !ok { - return - } - objCluster, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - return - } - - // only enqueue if spec or conditions change. - oldCluster = oldCluster.DeepCopy() - oldCluster.Status.Allocatable = objCluster.Status.Allocatable - oldCluster.Status.Capacity = objCluster.Status.Capacity - oldCluster.Status.LastSyncerHeartbeatTime = objCluster.Status.LastSyncerHeartbeatTime - - if !equality.Semantic.DeepEqual(oldCluster, objCluster) { - c.enqueueSyncTarget(obj) - } - }, - DeleteFunc: func(obj interface{}) { c.enqueueSyncTarget(obj) }, - }) - - return c, nil -} - -type Location = schedulingv1alpha1.Location -type LocationSpec = schedulingv1alpha1.LocationSpec -type LocationStatus = schedulingv1alpha1.LocationStatus -type Patcher = schedulingv1alpha1client.LocationInterface -type Resource = committer.Resource[*LocationSpec, *LocationStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -// controller. -type controller struct { - queue workqueue.RateLimitingInterface - enqueueAfter func(*schedulingv1alpha1.Location, time.Duration) - - kcpClusterClient kcpclientset.ClusterInterface - - locationLister schedulingv1alpha1listers.LocationClusterLister - syncTargetLister workloadv1alpha1listers.SyncTargetClusterLister - - commit CommitFunc -} - -func (c *controller) enqueueLocation(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), controllerName), key) - logger.V(2).Info("queueing Location") - c.queue.Add(key) -} - -// enqueueSyncTarget maps a SyncTarget to LocationDomain for enqueuing. -func (c *controller) enqueueSyncTarget(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - lcluster, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return - } - domains, err := c.locationLister.Cluster(lcluster).List(labels.Everything()) - if err != nil { - runtime.HandleError(err) - return - } - - for _, domain := range domains { - syncTargetKey := key - key, err := kcpcache.MetaClusterNamespaceKeyFunc(domain) - if err != nil { - runtime.HandleError(err) - return - } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), controllerName), key) - logger.V(2).Info("queueing Location because SyncTarget changed", "SyncTarget", syncTargetKey) - c.queue.Add(key) - } -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", controllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "invalid key") - return nil - } - - obj, err := c.locationLister.Cluster(clusterName).Get(name) - if err != nil { - if apierrors.IsNotFound(err) { - return nil // object deleted before we handled it - } - return err - } - - old := obj - obj = obj.DeepCopy() - - logger = logging.WithObject(logger, obj) - ctx = klog.NewContext(ctx, logger) - - var errs []error - if err := c.reconcile(ctx, obj); err != nil { - errs = append(errs, err) - } - - // Regardless of whether reconcile returned an error or not, always try to patch status if needed. Return the - // reconciliation error at the end. - - // If the object being reconciled changed as a result, update it. - oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status} - newResource := &Resource{ObjectMeta: obj.ObjectMeta, Spec: &obj.Spec, Status: &obj.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return utilerrors.NewAggregate(errs) -} diff --git a/pkg/reconciler/scheduling/location/location_reconcile.go b/pkg/reconciler/scheduling/location/location_reconcile.go deleted file mode 100644 index 4eadd04d23b..00000000000 --- a/pkg/reconciler/scheduling/location/location_reconcile.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package location - -import ( - "context" - "fmt" - "sort" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilserrors "k8s.io/apimachinery/pkg/util/errors" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type reconcileStatus int - -const ( - reconcileStatusStop reconcileStatus = iota - reconcileStatusContinue -) - -type reconciler interface { - reconcile(ctx context.Context, location *schedulingv1alpha1.Location) (reconcileStatus, error) -} - -// statusReconciler reconciles Location objects' status. -type statusReconciler struct { - listSyncTargets func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) - updateLocation func(ctx context.Context, clusterName logicalcluster.Path, location *schedulingv1alpha1.Location) (*schedulingv1alpha1.Location, error) - enqueueAfter func(*schedulingv1alpha1.Location, time.Duration) -} - -func (r *statusReconciler) reconcile(ctx context.Context, location *schedulingv1alpha1.Location) (reconcileStatus, error) { - clusterName := logicalcluster.From(location) - syncTargets, err := r.listSyncTargets(clusterName) - if err != nil { - return reconcileStatusStop, err - } - - // compute label string to be used in a table column - locationLabels := make(map[string]string, len(location.Labels)) - sorted := make([]string, 0, len(location.Labels)) - for k, v := range location.Labels { - locationLabels[k] = v - sorted = append(sorted, k) - } - sort.Strings(sorted) - labelString := "" - for i, k := range sorted { - if i > 0 { - labelString += " " - } - labelString += fmt.Sprintf("%s=%s", k, locationLabels[k]) - } - if labelString != location.Annotations[schedulingv1alpha1.LocationLabelsStringAnnotationKey] { - if location.Annotations == nil { - location.Annotations = make(map[string]string, 1) - } - location.Annotations[schedulingv1alpha1.LocationLabelsStringAnnotationKey] = labelString - if location, err = r.updateLocation(ctx, clusterName.Path(), location); err != nil { - return reconcileStatusStop, err - } - } - - // update status - locationClusters, err := LocationSyncTargets(syncTargets, location) - if err != nil { - return reconcileStatusStop, err - } - available := len(FilterReady(locationClusters)) - location.Status.Instances = uInt32(uint32(len(locationClusters))) - location.Status.AvailableInstances = uInt32(uint32(available)) - - return reconcileStatusContinue, nil -} - -func uInt32(i uint32) *uint32 { - return &i -} - -func (c *controller) reconcile(ctx context.Context, location *schedulingv1alpha1.Location) error { - reconcilers := []reconciler{ - &statusReconciler{ - listSyncTargets: c.listSyncTarget, - updateLocation: c.updateLocation, - enqueueAfter: c.enqueueAfter, - }, - } - - var errs []error - - for _, r := range reconcilers { - status, err := r.reconcile(ctx, location) - if err != nil { - errs = append(errs, err) - } - if status == reconcileStatusStop { - break - } - } - - return utilserrors.NewAggregate(errs) -} - -func (c *controller) listSyncTarget(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - return c.syncTargetLister.Cluster(clusterName).List(labels.Everything()) -} - -func (c *controller) updateLocation(ctx context.Context, clusterName logicalcluster.Path, location *schedulingv1alpha1.Location) (*schedulingv1alpha1.Location, error) { - return c.kcpClusterClient.Cluster(clusterName).SchedulingV1alpha1().Locations().Update(ctx, location, metav1.UpdateOptions{}) -} diff --git a/pkg/reconciler/scheduling/location/location_reconcile_test.go b/pkg/reconciler/scheduling/location/location_reconcile_test.go deleted file mode 100644 index c7537792b50..00000000000 --- a/pkg/reconciler/scheduling/location/location_reconcile_test.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package location - -import ( - "context" - "testing" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/yaml" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type LocationCheck func(t *testing.T, l *schedulingv1alpha1.Location) - -func availableInstances(expected uint32) func(t *testing.T, l *schedulingv1alpha1.Location) { - return func(t *testing.T, got *schedulingv1alpha1.Location) { - t.Helper() - require.NotNilf(t, got.Status.AvailableInstances, "expected %d available instances, not nil", expected) - require.Equal(t, expected, *got.Status.AvailableInstances) - } -} - -func instances(expected uint32) func(t *testing.T, l *schedulingv1alpha1.Location) { - return func(t *testing.T, got *schedulingv1alpha1.Location) { - t.Helper() - require.NotNilf(t, got.Status.Instances, "expected %d instances, not nil", expected) - require.Equal(t, expected, *got.Status.Instances) - } -} - -func labelString(expected string) func(t *testing.T, l *schedulingv1alpha1.Location) { - return func(t *testing.T, got *schedulingv1alpha1.Location) { - t.Helper() - require.Equal(t, expected, got.Annotations["scheduling.kcp.io/labels"]) - } -} - -func and(fns ...LocationCheck) LocationCheck { - return func(t *testing.T, l *schedulingv1alpha1.Location) { - t.Helper() - for _, fn := range fns { - fn(t, l) - } - } -} - -func TestLocationStatusReconciler(t *testing.T) { - usEast1 := &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "us-east1", - Labels: map[string]string{ - "continent": "north-america", - "country": "usa", - }, - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:org:negotiation-workspace", - "scheduling.kcp.io/labels": "continent=north-america country=usa", - }, - }, - Spec: schedulingv1alpha1.LocationSpec{ - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - Description: "Big region at the east coast of the US", - AvailableSelectorLabels: []schedulingv1alpha1.AvailableSelectorLabel{ - {Key: "gpu", Values: []schedulingv1alpha1.LabelValue{"true"}}, - {Key: "cloud", Values: []schedulingv1alpha1.LabelValue{"aws", "gcp", "azure", "ibm"}}, - }, - InstanceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"region": "us-east1"}}, - }, - } - usEast1WithoutLabelString := usEast1.DeepCopy() - usEast1WithoutLabelString.Annotations = nil - - tests := map[string]struct { - location *schedulingv1alpha1.Location - syncTargets map[logicalcluster.Path][]*workloadv1alpha1.SyncTarget - - listSyncTargetError error - updateLocationError error - - wantLocation LocationCheck - wantUpdates map[string]LocationCheck - wantReconcileStatus reconcileStatus - wantRequeue time.Duration - wantError bool - }{ - "no SyncTargets": { - location: usEast1, - wantLocation: and(availableInstances(0), instances(0), labelString("continent=north-america country=usa")), - wantReconcileStatus: reconcileStatusContinue, - }, - "no SyncTargets, different label string": { - location: usEast1WithoutLabelString, - wantLocation: and(availableInstances(0), instances(0), labelString("continent=north-america country=usa")), - wantUpdates: map[string]LocationCheck{ - "us-east1": labelString("continent=north-america country=usa"), - }, - wantReconcileStatus: reconcileStatusContinue, - }, - "with sync targets, across two regions": { - location: usEast1, - syncTargets: map[logicalcluster.Path][]*workloadv1alpha1.SyncTarget{ - logicalcluster.NewPath("root:org:negotiation-workspace"): { - withLabels(cluster("us-east1-1"), map[string]string{"region": "us-east1"}), - withLabels(withConditions(cluster("us-east1-2"), conditionsv1alpha1.Condition{Type: "Ready", Status: "False"}), map[string]string{"region": "us-east1"}), - withLabels(withConditions(cluster("us-east1-3"), conditionsv1alpha1.Condition{Type: "Ready", Status: "True"}), map[string]string{"region": "us-east1"}), - withLabels(unschedulable(withConditions(cluster("us-east1-4"), conditionsv1alpha1.Condition{Type: "Ready", Status: "True"})), map[string]string{"region": "us-east1"}), - - withLabels(withConditions(cluster("us-west1-1"), conditionsv1alpha1.Condition{Type: "Ready", Status: "True"}), map[string]string{"region": "us-west1"}), - withLabels(withConditions(cluster("us-west1-2"), conditionsv1alpha1.Condition{Type: "Ready", Status: "True"}), map[string]string{"region": "us-west1"}), - }, - logicalcluster.NewPath("root:org:somewhere-else"): { - cluster("us-east1-1"), - cluster("us-east1-2"), - }, - }, - wantLocation: and(availableInstances(1), instances(4)), - wantReconcileStatus: reconcileStatusContinue, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var requeuedAfter time.Duration - updates := map[string]*schedulingv1alpha1.Location{} - r := &statusReconciler{ - listSyncTargets: func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - if tc.listSyncTargetError != nil { - return nil, tc.listSyncTargetError - } - return tc.syncTargets[clusterName.Path()], nil - }, - updateLocation: func(ctx context.Context, clusterName logicalcluster.Path, location *schedulingv1alpha1.Location) (*schedulingv1alpha1.Location, error) { - if tc.updateLocationError != nil { - return nil, tc.updateLocationError - } - updates[location.Name] = location.DeepCopy() - return location, nil - }, - enqueueAfter: func(domain *schedulingv1alpha1.Location, duration time.Duration) { - requeuedAfter = duration - }, - } - - location := tc.location.DeepCopy() - status, err := r.reconcile(context.Background(), location) - if tc.wantError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - - require.Equal(t, status, tc.wantReconcileStatus) - require.Equal(t, tc.wantRequeue, requeuedAfter) - tc.wantLocation(t, location) - - // check updates - for _, l := range updates { - require.Contains(t, tc.wantUpdates, l.Name, "got unexpected update:\n%s", toYaml(l)) - tc.wantUpdates[l.Name](t, l) - } - for name := range tc.wantUpdates { - require.Contains(t, updates, name, "missing update for %s", name) - } - }) - } -} - -func cluster(name string) *workloadv1alpha1.SyncTarget { - ret := &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: workloadv1alpha1.SyncTargetSpec{}, - Status: workloadv1alpha1.SyncTargetStatus{}, - } - - return ret -} - -func withLabels(cluster *workloadv1alpha1.SyncTarget, labels map[string]string) *workloadv1alpha1.SyncTarget { - cluster.Labels = labels - return cluster -} - -func withConditions(cluster *workloadv1alpha1.SyncTarget, conditions ...conditionsv1alpha1.Condition) *workloadv1alpha1.SyncTarget { - cluster.Status.Conditions = conditions - return cluster -} - -func unschedulable(cluster *workloadv1alpha1.SyncTarget) *workloadv1alpha1.SyncTarget { - cluster.Spec.Unschedulable = true - return cluster -} - -func toYaml(obj interface{}) string { - bytes, err := yaml.Marshal(obj) - if err != nil { - panic(err) - } - return string(bytes) -} diff --git a/pkg/reconciler/scheduling/location/workloadcluster_helpers.go b/pkg/reconciler/scheduling/location/workloadcluster_helpers.go deleted file mode 100644 index 99d035ff9b0..00000000000 --- a/pkg/reconciler/scheduling/location/workloadcluster_helpers.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package location - -import ( - "fmt" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// LocationSyncTargets returns a list of sync targets that match the given location definition. -func LocationSyncTargets(syncTargets []*workloadv1alpha1.SyncTarget, location *schedulingv1alpha1.Location) (ret []*workloadv1alpha1.SyncTarget, err error) { - sel, err := metav1.LabelSelectorAsSelector(location.Spec.InstanceSelector) - if err != nil { - return nil, fmt.Errorf("failed to parse label selector %v in location %s: %w", location.Spec.InstanceSelector, location.Name, err) - } - - // find location clusters - for _, wc := range syncTargets { - if sel.Matches(labels.Set(wc.Labels)) { - ret = append(ret, wc) - } - } - - return ret, nil -} - -// FilterReady returns the ready sync targets. -func FilterReady(syncTargets []*workloadv1alpha1.SyncTarget) []*workloadv1alpha1.SyncTarget { - ready := make([]*workloadv1alpha1.SyncTarget, 0, len(syncTargets)) - for _, wc := range syncTargets { - if conditions.IsTrue(wc, conditionsv1alpha1.ReadyCondition) && !wc.Spec.Unschedulable { - ready = append(ready, wc) - } - } - return ready -} - -// FilterNonEvicting filters out the evicting sync targets. -func FilterNonEvicting(syncTargets []*workloadv1alpha1.SyncTarget) []*workloadv1alpha1.SyncTarget { - ret := make([]*workloadv1alpha1.SyncTarget, 0, len(syncTargets)) - now := time.Now() - for _, wc := range syncTargets { - if wc.Spec.EvictAfter == nil || now.Before(wc.Spec.EvictAfter.Time) { - ret = append(ret, wc) - } - } - return ret -} diff --git a/pkg/reconciler/scheduling/placement/placement_controller.go b/pkg/reconciler/scheduling/placement/placement_controller.go deleted file mode 100644 index 79204683fb8..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_controller.go +++ /dev/null @@ -1,350 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "fmt" - "reflect" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpcorev1informers "github.com/kcp-dev/client-go/informers/core/v1" - corev1listers "github.com/kcp-dev/client-go/listers/core/v1" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - "github.com/kcp-dev/kcp/sdk/apis/core" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" - schedulingv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" - schedulingv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/scheduling/v1alpha1" -) - -const ( - ControllerName = "kcp-scheduling-placement" - byLocationWorkspace = ControllerName + "-byLocationWorkspace" -) - -// NewController returns a new controller placing namespaces onto locations by create -// a placement annotation.. -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - namespaceInformer kcpcorev1informers.NamespaceClusterInformer, - locationInformer, globalLocationInformer schedulingv1alpha1informers.LocationClusterInformer, - placementInformer schedulingv1alpha1informers.PlacementClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) - - c := &controller{ - queue: queue, - enqueueAfter: func(ns *corev1.Namespace, duration time.Duration) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(ns) - if err != nil { - runtime.HandleError(err) - return - } - queue.AddAfter(key, duration) - }, - kcpClusterClient: kcpClusterClient, - - namespaceLister: namespaceInformer.Lister(), - - listLocationsByPath: func(path logicalcluster.Path) ([]*schedulingv1alpha1.Location, error) { - objs, err := indexers.ByIndexWithFallback[*schedulingv1alpha1.Location](locationInformer.Informer().GetIndexer(), globalLocationInformer.Informer().GetIndexer(), indexers.ByLogicalClusterPath, path.String()) - if err != nil { - return nil, err - } - return objs, nil - }, - - placementLister: placementInformer.Lister(), - placementIndexer: placementInformer.Informer().GetIndexer(), - - commit: committer.NewCommitter[*Placement, Patcher, *PlacementSpec, *PlacementStatus](kcpClusterClient.SchedulingV1alpha1().Placements()), - } - - if err := placementInformer.Informer().AddIndexers(cache.Indexers{ - byLocationWorkspace: indexByLocationWorkspace, - }); err != nil { - return nil, err - } - - indexers.AddIfNotPresentOrDie(locationInformer.Informer().GetIndexer(), cache.Indexers{ - indexers.ByLogicalClusterPath: indexers.IndexByLogicalClusterPath, - }) - indexers.AddIfNotPresentOrDie(globalLocationInformer.Informer().GetIndexer(), cache.Indexers{ - indexers.ByLogicalClusterPath: indexers.IndexByLogicalClusterPath, - }) - - // namespaceBlocklist holds a set of namespaces that should never be synced from kcp to physical clusters. - var namespaceBlocklist = sets.New[string]("kube-system", "kube-public", "kube-node-lease") - _, _ = namespaceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - switch ns := obj.(type) { - case *corev1.Namespace: - return !namespaceBlocklist.Has(ns.Name) - case cache.DeletedFinalStateUnknown: - return true - default: - return false - } - }, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueueNamespace, - UpdateFunc: func(old, obj interface{}) { - oldNs := old.(*corev1.Namespace) - newNs := obj.(*corev1.Namespace) - - if !reflect.DeepEqual(oldNs.Annotations, newNs.Annotations) { - c.enqueueNamespace(obj) - } - }, - DeleteFunc: c.enqueueNamespace, - }, - }) - - _, _ = locationInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueueLocation, - UpdateFunc: func(old, obj interface{}) { - oldLoc := old.(*schedulingv1alpha1.Location) - newLoc := obj.(*schedulingv1alpha1.Location) - if !reflect.DeepEqual(oldLoc.Spec, newLoc.Spec) || !reflect.DeepEqual(oldLoc.Labels, newLoc.Labels) { - c.enqueueLocation(obj) - } - }, - DeleteFunc: c.enqueueLocation, - }, - ) - - _, _ = placementInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueuePlacement, - UpdateFunc: func(_, obj interface{}) { c.enqueuePlacement(obj) }, - DeleteFunc: c.enqueuePlacement, - }) - - return c, nil -} - -type Placement = schedulingv1alpha1.Placement -type PlacementSpec = schedulingv1alpha1.PlacementSpec -type PlacementStatus = schedulingv1alpha1.PlacementStatus -type Patcher = schedulingv1alpha1client.PlacementInterface -type Resource = committer.Resource[*PlacementSpec, *PlacementStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -// controller. -type controller struct { - queue workqueue.RateLimitingInterface - enqueueAfter func(*corev1.Namespace, time.Duration) - - kcpClusterClient kcpclientset.ClusterInterface - - namespaceLister corev1listers.NamespaceClusterLister - - listLocationsByPath func(path logicalcluster.Path) ([]*schedulingv1alpha1.Location, error) - - placementLister schedulingv1alpha1listers.PlacementClusterLister - placementIndexer cache.Indexer - - commit CommitFunc -} - -func (c *controller) enqueuePlacement(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logger.V(2).Info("queueing Placement") - c.queue.Add(key) -} - -// enqueueNamespace enqueues all placements for the namespace. -func (c *controller) enqueueNamespace(obj interface{}) { - logger := logging.WithReconciler(klog.Background(), ControllerName) - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - clusterName, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return - } - - placements, err := c.placementLister.Cluster(clusterName).List(labels.Everything()) - if err != nil { - runtime.HandleError(err) - return - } - - for _, placement := range placements { - namespaceKey := key - key, err := kcpcache.MetaClusterNamespaceKeyFunc(placement) - if err != nil { - runtime.HandleError(err) - continue - } - logging.WithQueueKey(logger, key).V(2).Info("queueing Placement because Namespace changed", "Namespace", namespaceKey) - c.queue.Add(key) - } -} - -func (c *controller) enqueueLocation(obj interface{}) { - logger := logging.WithReconciler(klog.Background(), ControllerName) - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - } - - location, ok := obj.(*schedulingv1alpha1.Location) - if !ok { - runtime.HandleError(fmt.Errorf("unexpected object type: %T", obj)) - return - } - - // placements referencing by cluster name - placements, err := c.placementIndexer.ByIndex(byLocationWorkspace, logicalcluster.From(location).String()) - if err != nil { - runtime.HandleError(err) - return - } - if path := location.Annotations[core.LogicalClusterPathAnnotationKey]; path != "" { - // placements referencing by path - placementsByPath, err := c.placementIndexer.ByIndex(byLocationWorkspace, path) - if err != nil { - runtime.HandleError(err) - return - } - placements = append(placements, placementsByPath...) - } - - for _, obj := range placements { - placement := obj.(*schedulingv1alpha1.Placement) - key, err := kcpcache.MetaClusterNamespaceKeyFunc(placement) - if err != nil { - runtime.HandleError(err) - continue - } - logging.WithQueueKey(logger, key).V(2).Info("queueing Placement because Location changed") - c.queue.Add(key) - } -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "invalid key") - return nil - } - - obj, err := c.placementLister.Cluster(clusterName).Get(name) - if err != nil { - if apierrors.IsNotFound(err) { - return nil // object deleted before we handled it - } - return err - } - old := obj - obj = obj.DeepCopy() - - logger = logging.WithObject(logger, obj) - ctx = klog.NewContext(ctx, logger) - - var errs []error - if err := c.reconcile(ctx, obj); err != nil { - errs = append(errs, err) - } - - // Regardless of whether reconcile returned an error or not, always try to patch status if needed. Return the - // reconciliation error at the end. - - // If the object being reconciled changed as a result, update it. - oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status} - newResource := &Resource{ObjectMeta: obj.ObjectMeta, Spec: &obj.Spec, Status: &obj.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return utilerrors.NewAggregate(errs) -} diff --git a/pkg/reconciler/scheduling/placement/placement_indexes.go b/pkg/reconciler/scheduling/placement/placement_indexes.go deleted file mode 100644 index 3a3d59e80ba..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_indexes.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "fmt" - - "github.com/kcp-dev/logicalcluster/v3" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -func indexByLocationWorkspace(obj interface{}) ([]string, error) { - placement, ok := obj.(*schedulingv1alpha1.Placement) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a Placement, but is %T", obj) - } - - if len(placement.Spec.LocationWorkspace) == 0 { - return []string{logicalcluster.From(placement).String()}, nil - } - - return []string{placement.Spec.LocationWorkspace}, nil -} diff --git a/pkg/reconciler/scheduling/placement/placement_reconcile.go b/pkg/reconciler/scheduling/placement/placement_reconcile.go deleted file mode 100644 index 7d75538ef80..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_reconcile.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - utilserrors "k8s.io/apimachinery/pkg/util/errors" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -type reconcileStatus int - -const ( - reconcileStatusStop reconcileStatus = iota - reconcileStatusContinue -) - -type reconciler interface { - reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) (reconcileStatus, *schedulingv1alpha1.Placement, error) -} - -func (c *controller) reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) error { - reconcilers := []reconciler{ - &placementReconciler{ - listLocationsByPath: c.listLocationsByPath, - }, - &placementNamespaceReconciler{ - listNamespacesWithAnnotation: c.listNamespacesWithAnnotation, - }, - } - - var errs []error - - for _, r := range reconcilers { - var err error - var status reconcileStatus - status, placement, err = r.reconcile(ctx, placement) - if err != nil { - errs = append(errs, err) - } - if status == reconcileStatusStop { - break - } - } - - return utilserrors.NewAggregate(errs) -} - -func (c *controller) listNamespacesWithAnnotation(clusterName logicalcluster.Name) ([]*corev1.Namespace, error) { - items, err := c.namespaceLister.Cluster(clusterName).List(labels.Everything()) - if err != nil { - return nil, err - } - ret := make([]*corev1.Namespace, 0, len(items)) - for _, ns := range items { - _, foundPlacement := ns.Annotations[schedulingv1alpha1.PlacementAnnotationKey] - if foundPlacement { - ret = append(ret, ns) - } - } - return ret, nil -} diff --git a/pkg/reconciler/scheduling/placement/placement_reconcile_namespace.go b/pkg/reconciler/scheduling/placement/placement_reconcile_namespace.go deleted file mode 100644 index e4eadfc86cf..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_reconcile_namespace.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilserrors "k8s.io/apimachinery/pkg/util/errors" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -// placementNamespaceReconciler checks the namespaces bound to this placement and set the phase. -// If there are at least one namespace bound to this placement, the placement is in bound state. -type placementNamespaceReconciler struct { - listNamespacesWithAnnotation func(clusterName logicalcluster.Name) ([]*corev1.Namespace, error) -} - -func (r *placementNamespaceReconciler) reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) (reconcileStatus, *schedulingv1alpha1.Placement, error) { - if placement.Status.Phase == schedulingv1alpha1.PlacementPending { - return reconcileStatusContinue, placement, nil - } - - if placement.Status.SelectedLocation == nil { - placement.Status.Phase = schedulingv1alpha1.PlacementPending - return reconcileStatusContinue, placement, nil - } - - // found all namespaces using placement's namespace selector who also have the placement annotation key. - nss, err := r.selectNamespaces(placement) - if err != nil { - return reconcileStatusContinue, placement, err - } - - if len(nss) > 0 { - placement.Status.Phase = schedulingv1alpha1.PlacementBound - } else { - placement.Status.Phase = schedulingv1alpha1.PlacementUnbound - } - - return reconcileStatusContinue, placement, err -} - -func (r *placementNamespaceReconciler) selectNamespaces(placement *schedulingv1alpha1.Placement) ([]*corev1.Namespace, error) { - clusterName := logicalcluster.From(placement) - nss, err := r.listNamespacesWithAnnotation(clusterName) - - if err != nil { - return nil, err - } - - selector, err := metav1.LabelSelectorAsSelector(placement.Spec.NamespaceSelector) - if err != nil { - return nil, err - } - - candidates := []*corev1.Namespace{} - var errs []error - for _, ns := range nss { - if !selector.Matches(labels.Set(ns.Labels)) { - continue - } - - candidates = append(candidates, ns) - } - - return candidates, utilserrors.NewAggregate(errs) -} diff --git a/pkg/reconciler/scheduling/placement/placement_reconcile_namespace_test.go b/pkg/reconciler/scheduling/placement/placement_reconcile_namespace_test.go deleted file mode 100644 index 94c631d75cd..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_reconcile_namespace_test.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -func TestPlacementPhase(t *testing.T) { - testCases := []struct { - name string - ns *corev1.Namespace - phase schedulingv1alpha1.PlacementPhase - namespaceSelector *metav1.LabelSelector - selectedLocation *schedulingv1alpha1.LocationReference - expectedPhase schedulingv1alpha1.PlacementPhase - }{ - { - name: "placement is pending", - phase: schedulingv1alpha1.PlacementPending, - expectedPhase: schedulingv1alpha1.PlacementPending, - }, - { - name: "placement has no selected location", - phase: schedulingv1alpha1.PlacementUnbound, - expectedPhase: schedulingv1alpha1.PlacementPending, - }, - { - name: "namespace does not have placement annotation", - phase: schedulingv1alpha1.PlacementUnbound, - selectedLocation: &schedulingv1alpha1.LocationReference{ - Path: "root", - LocationName: "test-location", - }, - expectedPhase: schedulingv1alpha1.PlacementUnbound, - }, - { - name: "namespace does not bound to this placement", - phase: schedulingv1alpha1.PlacementBound, - ns: &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "testns"}}, - selectedLocation: &schedulingv1alpha1.LocationReference{ - Path: "root", - LocationName: "test-location", - }, - expectedPhase: schedulingv1alpha1.PlacementUnbound, - }, - { - name: "namespace bound to this placement, but not select by placement", - phase: schedulingv1alpha1.PlacementBound, - ns: &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "testns"}}, - selectedLocation: &schedulingv1alpha1.LocationReference{ - Path: "root", - LocationName: "test-location", - }, - expectedPhase: schedulingv1alpha1.PlacementUnbound, - }, - { - name: "namespace bound to this placement", - phase: schedulingv1alpha1.PlacementUnbound, - namespaceSelector: &metav1.LabelSelector{}, - ns: &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "testns"}}, - selectedLocation: &schedulingv1alpha1.LocationReference{ - Path: "root", - LocationName: "test-location", - }, - expectedPhase: schedulingv1alpha1.PlacementBound, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - testPlacement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-placement", - }, - Spec: schedulingv1alpha1.PlacementSpec{ - NamespaceSelector: testCase.namespaceSelector, - }, - Status: schedulingv1alpha1.PlacementStatus{ - Phase: testCase.phase, - SelectedLocation: testCase.selectedLocation, - }, - } - listNamespacesWithAnnotation := func(clusterName logicalcluster.Name) ([]*corev1.Namespace, error) { - if testCase.ns == nil { - return []*corev1.Namespace{}, nil - } - return []*corev1.Namespace{testCase.ns}, nil - } - - reconciler := &placementNamespaceReconciler{listNamespacesWithAnnotation: listNamespacesWithAnnotation} - - _, updated, err := reconciler.reconcile(context.TODO(), testPlacement) - require.NoError(t, err) - require.Equal(t, updated.Status.Phase, testCase.expectedPhase) - }) - } -} diff --git a/pkg/reconciler/scheduling/placement/placement_reconcile_scheduling.go b/pkg/reconciler/scheduling/placement/placement_reconcile_scheduling.go deleted file mode 100644 index bc46fb0d91b..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_reconcile_scheduling.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "math/rand" - - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -// placementReconciler watches namespaces within a workspace and assigns those to location from -// the location domain of the workspace. -type placementReconciler struct { - listLocationsByPath func(path logicalcluster.Path) ([]*schedulingv1alpha1.Location, error) -} - -func (r *placementReconciler) reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) (reconcileStatus, *schedulingv1alpha1.Placement, error) { - // get location workspace at first - var locationWorkspace logicalcluster.Path - if len(placement.Spec.LocationWorkspace) > 0 { - locationWorkspace = logicalcluster.NewPath(placement.Spec.LocationWorkspace) - } else { - locationWorkspace = logicalcluster.From(placement).Path() - } - - locationWorkspace, validLocationNames, err := r.validLocationNames(placement, locationWorkspace) - if err != nil { - conditions.MarkFalse(placement, schedulingv1alpha1.PlacementReady, schedulingv1alpha1.LocationNotFoundReason, conditionsv1alpha1.ConditionSeverityError, err.Error()) - return reconcileStatusContinue, placement, err - } - - switch placement.Status.Phase { - case schedulingv1alpha1.PlacementBound: - // if selected location becomes invalid when placement is in bound state, set PlacementReady - // to false. - if !isValidLocationSelected(placement, locationWorkspace, validLocationNames) { - conditions.MarkFalse( - placement, - schedulingv1alpha1.PlacementReady, - schedulingv1alpha1.LocationInvalidReason, - conditionsv1alpha1.ConditionSeverityError, - "Selected location is invalid for current placement", - ) - return reconcileStatusContinue, placement, nil - } - - conditions.MarkTrue(placement, schedulingv1alpha1.PlacementReady) - return reconcileStatusContinue, placement, nil - case schedulingv1alpha1.PlacementUnbound: - if isValidLocationSelected(placement, locationWorkspace, validLocationNames) { - // if the selected location is valid, keep it. - conditions.MarkTrue(placement, schedulingv1alpha1.PlacementReady) - return reconcileStatusContinue, placement, nil - } - } - - // now it is pending state or in unbound state and needs a reselection - if validLocationNames.Len() == 0 { - placement.Status.Phase = schedulingv1alpha1.PlacementPending - placement.Status.SelectedLocation = nil - conditions.MarkFalse( - placement, - schedulingv1alpha1.PlacementReady, - schedulingv1alpha1.LocationNotMatchReason, - conditionsv1alpha1.ConditionSeverityError, - "No valid location is found") - return reconcileStatusContinue, placement, nil - } - - candidates := make([]string, 0, validLocationNames.Len()) - for loc := range validLocationNames { - candidates = append(candidates, loc) - } - - // TODO(qiujian16): two placements could select the same location. We should - // consider whether placements in a workspace should always select different locations. - chosenLocation := candidates[rand.Intn(len(candidates))] - placement.Status.SelectedLocation = &schedulingv1alpha1.LocationReference{ - Path: locationWorkspace.String(), - LocationName: chosenLocation, - } - placement.Status.Phase = schedulingv1alpha1.PlacementUnbound - conditions.MarkTrue(placement, schedulingv1alpha1.PlacementReady) - - return reconcileStatusContinue, placement, nil -} - -func (r *placementReconciler) validLocationNames(placement *schedulingv1alpha1.Placement, locationWorkspace logicalcluster.Path) (logicalcluster.Path, sets.Set[string], error) { - var locationCluster logicalcluster.Path - selectedLocations := sets.New[string]() - - locations, err := r.listLocationsByPath(locationWorkspace) - if err != nil { - return logicalcluster.None, selectedLocations, err - } - - for _, loc := range locations { - if loc.Spec.Resource != placement.Spec.LocationResource { - continue - } - locationCluster = logicalcluster.From(loc).Path() - - for i := range placement.Spec.LocationSelectors { - s := placement.Spec.LocationSelectors[i] - selector, err := metav1.LabelSelectorAsSelector(&s) - if err != nil { - // skip this selector - continue - } - - if selector.Matches(labels.Set(loc.Labels)) { - selectedLocations.Insert(loc.Name) - } - } - } - - return locationCluster, selectedLocations, nil -} - -func isValidLocationSelected(placement *schedulingv1alpha1.Placement, cluster logicalcluster.Path, validLocationNames sets.Set[string]) bool { - if placement.Status.SelectedLocation == nil { - return false - } - - if placement.Status.SelectedLocation.Path != cluster.String() { - return false - } - - if !validLocationNames.Has(placement.Status.SelectedLocation.LocationName) { - return false - } - - return true -} diff --git a/pkg/reconciler/scheduling/placement/placement_reconcile_scheduling_test.go b/pkg/reconciler/scheduling/placement/placement_reconcile_scheduling_test.go deleted file mode 100644 index fbf87238e76..00000000000 --- a/pkg/reconciler/scheduling/placement/placement_reconcile_scheduling_test.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "fmt" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -func TestPlacementScheduling(t *testing.T) { - testCases := []struct { - name string - locationSelectors []metav1.LabelSelector - locations []*schedulingv1alpha1.Location - phase schedulingv1alpha1.PlacementPhase - selectedLocation *schedulingv1alpha1.LocationReference - - listLocationsError error - - wantError bool - wantPhase schedulingv1alpha1.PlacementPhase - wantSelectLocation *schedulingv1alpha1.LocationReference - wantStatus corev1.ConditionStatus - }{ - { - name: "no locations", - phase: schedulingv1alpha1.PlacementPending, - wantPhase: schedulingv1alpha1.PlacementPending, - wantStatus: corev1.ConditionFalse, - }, - { - name: "bound location to the placement", - phase: schedulingv1alpha1.PlacementPending, - locationSelectors: []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "cloud": "aws", - }, - }, - }, - locations: []*schedulingv1alpha1.Location{ - newLocation("aws", map[string]string{"cloud": "aws"}), - newLocation("gcp", map[string]string{"cloud": "gcp"}), - }, - wantPhase: schedulingv1alpha1.PlacementUnbound, - wantStatus: corev1.ConditionTrue, - wantSelectLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - }, - { - name: "update location to the placement", - phase: schedulingv1alpha1.PlacementUnbound, - locationSelectors: []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "cloud": "gcp", - }, - }, - }, - selectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - locations: []*schedulingv1alpha1.Location{ - newLocation("aws", map[string]string{"cloud": "aws"}), - newLocation("gcp", map[string]string{"cloud": "gcp"}), - }, - wantPhase: schedulingv1alpha1.PlacementUnbound, - wantStatus: corev1.ConditionTrue, - wantSelectLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "gcp", - }, - }, - { - name: "stick location when the placement is bound", - phase: schedulingv1alpha1.PlacementBound, - locationSelectors: []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "cloud": "aws", - }, - }, - }, - selectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - locations: []*schedulingv1alpha1.Location{ - newLocation("aws", map[string]string{"cloud": "aws"}), - newLocation("aws-1", map[string]string{"cloud": "aws"}), - newLocation("aws-2", map[string]string{"cloud": "aws"}), - }, - wantPhase: schedulingv1alpha1.PlacementBound, - wantStatus: corev1.ConditionTrue, - wantSelectLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - }, - { - name: "change location when the placement is bound", - phase: schedulingv1alpha1.PlacementBound, - locationSelectors: []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "cloud": "gcp", - }, - }, - }, - selectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - locations: []*schedulingv1alpha1.Location{ - newLocation("aws", map[string]string{"cloud": "aws"}), - }, - wantPhase: schedulingv1alpha1.PlacementBound, - wantStatus: corev1.ConditionFalse, - wantSelectLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - }, - { - name: "no valid location when the placement is unbound", - phase: schedulingv1alpha1.PlacementUnbound, - locationSelectors: []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "cloud": "gcp", - }, - }, - }, - selectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - locations: []*schedulingv1alpha1.Location{ - newLocation("aws", map[string]string{"cloud": "aws"}), - }, - wantPhase: schedulingv1alpha1.PlacementPending, - wantStatus: corev1.ConditionFalse, - }, - { - name: "get location error", - phase: schedulingv1alpha1.PlacementUnbound, - locationSelectors: []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "cloud": "aws", - }, - }, - }, - listLocationsError: fmt.Errorf("list location fails"), - selectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - locations: []*schedulingv1alpha1.Location{ - newLocation("aws", map[string]string{"cloud": "aws"}), - }, - wantPhase: schedulingv1alpha1.PlacementUnbound, - wantStatus: corev1.ConditionFalse, - wantSelectLocation: &schedulingv1alpha1.LocationReference{ - LocationName: "aws", - }, - wantError: true, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - testPlacement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-placement", - }, - Spec: schedulingv1alpha1.PlacementSpec{ - LocationSelectors: testCase.locationSelectors, - }, - Status: schedulingv1alpha1.PlacementStatus{ - SelectedLocation: testCase.selectedLocation, - Phase: testCase.phase, - }, - } - - listLocation := func(clusterName logicalcluster.Path) ([]*schedulingv1alpha1.Location, error) { - return testCase.locations, testCase.listLocationsError - } - - reconciler := &placementReconciler{listLocationsByPath: listLocation} - _, updated, err := reconciler.reconcile(context.TODO(), testPlacement) - - if testCase.wantError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - - require.Equal(t, testCase.wantPhase, updated.Status.Phase) - c := conditions.Get(updated, schedulingv1alpha1.PlacementReady) - require.NotNil(t, c) - require.Equal(t, testCase.wantStatus, c.Status) - require.Equal(t, testCase.wantSelectLocation, updated.Status.SelectedLocation) - }) - } -} - -func newLocation(name string, labels map[string]string) *schedulingv1alpha1.Location { - return &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - } -} diff --git a/pkg/reconciler/workload/apiexport/apiresourceschema.go b/pkg/reconciler/workload/apiexport/apiresourceschema.go deleted file mode 100644 index 382e0067f46..00000000000 --- a/pkg/reconciler/workload/apiexport/apiresourceschema.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiexport - -import ( - "strings" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" -) - -func toAPIResourceSchema(r *apiresourcev1alpha1.NegotiatedAPIResource, name string) *apisv1alpha1.APIResourceSchema { - group := r.Spec.CommonAPIResourceSpec.GroupVersion.Group - if group == "core" { - group = "" - } - schema := &apisv1alpha1.APIResourceSchema{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apisv1alpha1.APIResourceSchemaSpec{ - Group: group, - Names: r.Spec.CommonAPIResourceSpec.CustomResourceDefinitionNames, - Scope: r.Spec.CommonAPIResourceSpec.Scope, - Versions: []apisv1alpha1.APIResourceVersion{ - { - Name: r.Spec.CommonAPIResourceSpec.GroupVersion.Version, - Served: true, - Storage: true, - Schema: runtime.RawExtension{ - Raw: r.Spec.CommonAPIResourceSpec.OpenAPIV3Schema.Raw, - }, - }, - }, - }, - } - for _, sr := range r.Spec.CommonAPIResourceSpec.SubResources { - switch sr.Name { - case apiresourcev1alpha1.ScaleSubResourceName: - schema.Spec.Versions[0].Subresources.Scale = &apiextensionsv1.CustomResourceSubresourceScale{ - // TODO(sttts): change NegotiatedAPIResource and APIResourceImport to preserve the paths from the CRDs in the pcluster, or have custom logic for native resources. Here, we can only guess. - SpecReplicasPath: ".spec.replicas", - StatusReplicasPath: ".status.replicas", - } - case apiresourcev1alpha1.StatusSubResourceName: - schema.Spec.Versions[0].Subresources.Status = &apiextensionsv1.CustomResourceSubresourceStatus{} - } - } - schema.Spec.Versions[0].AdditionalPrinterColumns = r.Spec.CommonAPIResourceSpec.ColumnDefinitions.ToCustomResourceColumnDefinitions() - - if value, found := r.Annotations[apiextensionsv1.KubeAPIApprovedAnnotation]; found { - schema.Annotations = map[string]string{ - apiextensionsv1.KubeAPIApprovedAnnotation: value, - } - } - - return schema -} - -// ParseAPIResourceSchemaName parses name of APIResourceSchema to a gr and schema if it is valid. -func ParseAPIResourceSchemaName(name string) (schema.GroupResource, bool) { - comps := strings.SplitN(name, ".", 3) - if len(comps) < 3 { - return schema.GroupResource{}, false - } - return schema.GroupResource{Resource: comps[1], Group: comps[2]}, true -} diff --git a/pkg/reconciler/workload/apiexport/workload_apiexport_controller.go b/pkg/reconciler/workload/apiexport/workload_apiexport_controller.go deleted file mode 100644 index c36572a5b03..00000000000 --- a/pkg/reconciler/workload/apiexport/workload_apiexport_controller.go +++ /dev/null @@ -1,254 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiexport - -import ( - "context" - "fmt" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - apiresourcev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apiresource/v1alpha1" - apisv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - apiresourcev1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/apiresource/v1alpha1" - apisv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/apis/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-workload-apiexport" -) - -// NewController returns a new controller instance. -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - apiExportInformer apisv1alpha1informers.APIExportClusterInformer, - apiResourceSchemaInformer apisv1alpha1informers.APIResourceSchemaClusterInformer, - negotiatedAPIResourceInformer apiresourcev1alpha1informers.NegotiatedAPIResourceClusterInformer, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) - - c := &controller{ - queue: queue, - enqueueAfter: func(export *apisv1alpha1.APIExport, duration time.Duration) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(export) - if err != nil { - runtime.HandleError(err) - return - } - queue.AddAfter(key, duration) - }, - kcpClusterClient: kcpClusterClient, - apiExportsLister: apiExportInformer.Lister(), - apiResourceSchemaLister: apiResourceSchemaInformer.Lister(), - negotiatedAPIResourceLister: negotiatedAPIResourceInformer.Lister(), - syncTargetClusterLister: syncTargetInformer.Lister(), - } - - _, _ = apiExportInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - switch t := obj.(type) { - case *apisv1alpha1.APIExport: - return t.Name == workloadv1alpha1.ImportedAPISExportName - } - return false - }, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueAPIExport(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueAPIExport(obj) }, - DeleteFunc: func(obj interface{}) { c.enqueueAPIExport(obj) }, - }, - }) - - _, _ = negotiatedAPIResourceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueNegotiatedAPIResource(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueNegotiatedAPIResource(obj) }, - DeleteFunc: func(obj interface{}) { c.enqueueNegotiatedAPIResource(obj) }, - }) - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueSyncTarget(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueSyncTarget(obj) }, - }) - - return c, nil -} - -// controller reconciles APIResourceSchemas and the "workloads" APIExport in a -// API negotiation domain based on NegotiatedAPIResources: -// - it creates APIResourceSchemas for every NegotiatedAPIResource in the workspace -// - it maintains the list of latest resource schemas in the APIExport -// - it deletes APIResourceSchemas that have no NegotiatedAPIResource in the workspace anymore, but are listed in the APIExport. -// -// It does NOT create APIExport. -type controller struct { - queue workqueue.RateLimitingInterface - enqueueAfter func(*apisv1alpha1.APIExport, time.Duration) - - kcpClusterClient kcpclientset.ClusterInterface - - apiExportsLister apisv1alpha1listers.APIExportClusterLister - apiResourceSchemaLister apisv1alpha1listers.APIResourceSchemaClusterLister - negotiatedAPIResourceLister apiresourcev1alpha1listers.NegotiatedAPIResourceClusterLister - syncTargetClusterLister workloadv1alpha1listers.SyncTargetClusterLister -} - -func (c *controller) enqueueNegotiatedAPIResource(obj interface{}) { - resource, ok := obj.(*apiresourcev1alpha1.NegotiatedAPIResource) - if !ok { - runtime.HandleError(fmt.Errorf("obj is supposed to be a NegotiatedAPIResource, but is %T", obj)) - return - } - - export, err := c.apiExportsLister.Cluster(logicalcluster.From(resource)).Get(workloadv1alpha1.ImportedAPISExportName) - if errors.IsNotFound(err) { - return // it's gone - } else if err != nil { - runtime.HandleError(fmt.Errorf("failed to get APIExport %s|%s: %w", logicalcluster.From(resource), workloadv1alpha1.ImportedAPISExportName, err)) - return - } - - key, err := kcpcache.MetaClusterNamespaceKeyFunc(export) - if err != nil { - runtime.HandleError(err) - return - } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logging.WithObject(logger, resource).V(2).Info("queueing APIExport due to NegotiatedAPIResource") - c.queue.Add(key) -} - -func (c *controller) enqueueSyncTarget(obj interface{}) { - resource, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - runtime.HandleError(fmt.Errorf("obj is supposed to be a SyncTarget, but is %T", obj)) - return - } - - export, err := c.apiExportsLister.Cluster(logicalcluster.From(resource)).Get(workloadv1alpha1.ImportedAPISExportName) - if errors.IsNotFound(err) { - return // it's gone - } else if err != nil { - runtime.HandleError(fmt.Errorf("failed to get APIExport %s|%s: %w", logicalcluster.From(resource), workloadv1alpha1.ImportedAPISExportName, err)) - return - } - - key, err := kcpcache.MetaClusterNamespaceKeyFunc(export) - if err != nil { - runtime.HandleError(err) - return - } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logging.WithObject(logger, resource).V(2).Info("queueing APIExport due to SyncTarget") - c.queue.Add(key) -} - -func (c *controller) enqueueAPIExport(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logger.V(2).Info("queueing APIExport") - c.queue.Add(key) -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) error { - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return nil - } - obj, err := c.apiExportsLister.Cluster(clusterName).Get(name) - if err != nil { - if errors.IsNotFound(err) { - return nil // object deleted before we handled it - } - return err - } - obj = obj.DeepCopy() - - logger := logging.WithObject(klog.FromContext(ctx), obj) - ctx = klog.NewContext(ctx, logger) - - return c.reconcile(ctx, obj) -} diff --git a/pkg/reconciler/workload/apiexport/workload_apiexport_reconcile.go b/pkg/reconciler/workload/apiexport/workload_apiexport_reconcile.go deleted file mode 100644 index 075b564b8c6..00000000000 --- a/pkg/reconciler/workload/apiexport/workload_apiexport_reconcile.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiexport - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type reconcileStatus int - -const ( - reconcileStatusStop reconcileStatus = iota - reconcileStatusContinue -) - -type reconciler interface { - reconcile(ctx context.Context, export *apisv1alpha1.APIExport) (reconcileStatus, error) -} - -type schemaReconciler struct { - listNegotiatedAPIResources func(clusterName logicalcluster.Name) ([]*apiresourcev1alpha1.NegotiatedAPIResource, error) - listAPIResourceSchemas func(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIResourceSchema, error) - listSyncTargets func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) - getAPIResourceSchema func(ctx context.Context, clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) - createAPIResourceSchema func(ctx context.Context, clusterName logicalcluster.Path, schema *apisv1alpha1.APIResourceSchema) (*apisv1alpha1.APIResourceSchema, error) - deleteAPIResourceSchema func(ctx context.Context, clusterName logicalcluster.Path, name string) error - updateAPIExport func(ctx context.Context, clusterName logicalcluster.Path, export *apisv1alpha1.APIExport) (*apisv1alpha1.APIExport, error) - - enqueueAfter func(*apisv1alpha1.APIExport, time.Duration) -} - -func (r *schemaReconciler) reconcile(ctx context.Context, export *apisv1alpha1.APIExport) (reconcileStatus, error) { - logger := klog.FromContext(ctx) - clusterName := logicalcluster.From(export) - - if export.Name != workloadv1alpha1.ImportedAPISExportName { - return reconcileStatusStop, nil - } - - resources, err := r.listNegotiatedAPIResources(clusterName) - if err != nil { - return reconcileStatusStop, err - } - if len(resources) == 0 { - // ignore this export. Compare with TODO above about identfication. - return reconcileStatusStop, nil - } - - resourcesByResourceGroup := map[schema.GroupResource]*apiresourcev1alpha1.NegotiatedAPIResource{} - for _, r := range resources { - // TODO(sttts): what about multiple versions of the same resource? Something is missing in the apiresource APIs and controllers to support that. - gr := schema.GroupResource{ - Group: r.Spec.GroupVersion.Group, - Resource: r.Spec.Plural, - } - if gr.Group == "" { - gr.Group = "core" - } - resourcesByResourceGroup[gr] = r - } - - // reconcile schemas in export - // we check all schemas reference in the apiexport. if it is missing, we should create the schema - // if it is outdated, we should create the schema and delete the outdated schema. - upToDateResourceGroups := sets.New[string]() - expectedResourceGroups := sets.New[string]() - - // schemaNamesByResourceGroup records the up-to-date APIResourceSchemaName for the resourceGroup, and the - // APIExport will be updated accordingly - schemaNamesByResourceGroup := map[schema.GroupResource]string{} - for _, schemaName := range export.Spec.LatestResourceSchemas { - gr, ok := ParseAPIResourceSchemaName(schemaName) - if !ok { - continue - } - expectedResourceGroups.Insert(gr.String()) - schemaNamesByResourceGroup[gr] = schemaName - - existingSchema, err := r.getAPIResourceSchema(ctx, clusterName, schemaName) - if apierrors.IsNotFound(err) { - // not found, will be recreated - continue - } else if err != nil { - return reconcileStatusStop, err - } - - // negotiated schema gone? - negotiated, ok := resourcesByResourceGroup[gr] - if !ok { - continue - } - - // negotiated schema still matches APIResourceSchema? - newSchema := toAPIResourceSchema(negotiated, "") - if equality.Semantic.DeepEqual(&existingSchema.Spec, &newSchema.Spec) { - // nothing to do - upToDateResourceGroups.Insert(gr.String()) - } - } - - // create missing or outdated schemas - outdatedOrMissing := expectedResourceGroups.Difference(upToDateResourceGroups) - outDatedSchemaNames := sets.New[string]() - for _, resourceGroup := range sets.List[string](outdatedOrMissing) { - logger.WithValues("schema", resourceGroup).V(2).Info("missing or outdated schema on APIExport, adding") - gr := schema.ParseGroupResource(resourceGroup) - resource, ok := resourcesByResourceGroup[gr] - if !ok { - // no negotiated schema, keep the current schema name. - continue - } - - group := resource.Spec.GroupVersion.Group - if group == "" { - group = "core" - } - schemaName := fmt.Sprintf("rev-%s.%s.%s", resource.ResourceVersion, resource.Spec.Plural, group) - schema := toAPIResourceSchema(resource, schemaName) - schema.OwnerReferences = []metav1.OwnerReference{ - *metav1.NewControllerRef(export, apisv1alpha1.SchemeGroupVersion.WithKind("APIExport")), - } - schema, err = r.createAPIResourceSchema(ctx, clusterName.Path(), schema) - if apierrors.IsAlreadyExists(err) { - schema, err = r.getAPIResourceSchema(ctx, clusterName, schemaName) - } - if err != nil { - return reconcileStatusStop, err - } - - if _, ok := schemaNamesByResourceGroup[gr]; ok { - outDatedSchemaNames.Insert(schemaNamesByResourceGroup[gr]) - } - schemaNamesByResourceGroup[gr] = schema.Name - } - - // update schema list in export - old := export.DeepCopy() - export.Spec.LatestResourceSchemas = []string{} - for _, schemaName := range schemaNamesByResourceGroup { - export.Spec.LatestResourceSchemas = append(export.Spec.LatestResourceSchemas, schemaName) - } - if !reflect.DeepEqual(old.Spec.LatestResourceSchemas, export.Spec.LatestResourceSchemas) { - if _, err := r.updateAPIExport(ctx, clusterName.Path(), export); err != nil { - return reconcileStatusStop, err - } - } - - // delete schemas that are no longer needed - for schemaName := range outDatedSchemaNames { - logger.V(2).Info("deleting schema of APIExport", "APIResourceSchema", schemaName) - if err := r.deleteAPIResourceSchema(ctx, clusterName.Path(), schemaName); err != nil && !apierrors.IsNotFound(err) { - return reconcileStatusStop, err - } - } - - return reconcileStatusContinue, nil -} - -func (c *controller) reconcile(ctx context.Context, export *apisv1alpha1.APIExport) error { - reconcilers := []reconciler{ - &schemaReconciler{ - listNegotiatedAPIResources: c.listNegotiatedAPIResources, - listAPIResourceSchemas: c.listAPIResourceSchemas, - listSyncTargets: c.listSyncTarget, - getAPIResourceSchema: c.getAPIResourceSchema, - createAPIResourceSchema: c.createAPIResourceSchema, - deleteAPIResourceSchema: c.deleteAPIResourceSchema, - updateAPIExport: c.updateAPIExport, - enqueueAfter: c.enqueueAfter, - }, - } - - var errs []error - - for _, r := range reconcilers { - status, err := r.reconcile(ctx, export) - if err != nil { - errs = append(errs, err) - } - if status == reconcileStatusStop { - break - } - } - - return errors.NewAggregate(errs) -} - -func (c *controller) listNegotiatedAPIResources(clusterName logicalcluster.Name) ([]*apiresourcev1alpha1.NegotiatedAPIResource, error) { - return c.negotiatedAPIResourceLister.Cluster(clusterName).List(labels.Everything()) -} - -func (c *controller) listAPIResourceSchemas(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIResourceSchema, error) { - return c.apiResourceSchemaLister.Cluster(clusterName).List(labels.Everything()) -} - -func (c *controller) listSyncTarget(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - return c.syncTargetClusterLister.Cluster(clusterName).List(labels.Everything()) -} - -func (c *controller) getAPIResourceSchema(ctx context.Context, clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) { - schema, err := c.apiResourceSchemaLister.Cluster(clusterName).Get(name) - if apierrors.IsNotFound(err) { - return c.kcpClusterClient.Cluster(clusterName.Path()).ApisV1alpha1().APIResourceSchemas().Get(ctx, name, metav1.GetOptions{}) - } - return schema, err -} - -func (c *controller) createAPIResourceSchema(ctx context.Context, clusterName logicalcluster.Path, schema *apisv1alpha1.APIResourceSchema) (*apisv1alpha1.APIResourceSchema, error) { - return c.kcpClusterClient.Cluster(clusterName).ApisV1alpha1().APIResourceSchemas().Create(ctx, schema, metav1.CreateOptions{}) -} - -func (c *controller) updateAPIExport(ctx context.Context, clusterName logicalcluster.Path, export *apisv1alpha1.APIExport) (*apisv1alpha1.APIExport, error) { - return c.kcpClusterClient.Cluster(clusterName).ApisV1alpha1().APIExports().Update(ctx, export, metav1.UpdateOptions{}) -} - -func (c *controller) deleteAPIResourceSchema(ctx context.Context, clusterName logicalcluster.Path, name string) error { - return c.kcpClusterClient.Cluster(clusterName).ApisV1alpha1().APIResourceSchemas().Delete(ctx, name, metav1.DeleteOptions{}) -} diff --git a/pkg/reconciler/workload/apiexport/workload_apiexport_reconcile_test.go b/pkg/reconciler/workload/apiexport/workload_apiexport_reconcile_test.go deleted file mode 100644 index 9c01a01daec..00000000000 --- a/pkg/reconciler/workload/apiexport/workload_apiexport_reconcile_test.go +++ /dev/null @@ -1,528 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiexport - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/pointer" - "sigs.k8s.io/yaml" - - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type SchemaCheck func(t *testing.T, s *apisv1alpha1.APIResourceSchema) - -func someSchemaWeDontCareAboutInDetail(t *testing.T, got *apisv1alpha1.APIResourceSchema) { - t.Helper() -} - -func equals(expected *apisv1alpha1.APIResourceSchema) func(*testing.T, *apisv1alpha1.APIResourceSchema) { - return func(t *testing.T, got *apisv1alpha1.APIResourceSchema) { - t.Helper() - require.Equal(t, expected, got) - } -} - -type ExportCheck func(t *testing.T, s *apisv1alpha1.APIExport) - -func hasSchemas(expected ...string) func(*testing.T, *apisv1alpha1.APIExport) { - return func(t *testing.T, got *apisv1alpha1.APIExport) { - t.Helper() - require.Equal(t, expected, got.Spec.LatestResourceSchemas) - } -} - -func TestSchemaReconciler(t *testing.T) { - tests := map[string]struct { - negotiatedResources map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource - schemas map[logicalcluster.Name][]*apisv1alpha1.APIResourceSchema - syncTargets map[logicalcluster.Name][]*workloadv1alpha1.SyncTarget - export *apisv1alpha1.APIExport - - listNegotiatedAPIResourcesError error - listAPIResourceSchemaError error - getAPIResourceSchemaError error - createAPIResourceSchemaError error - deleteAPIResourceSchemaError error - updateAPIExportError error - - wantSchemaCreates map[string]SchemaCheck - wantExportUpdates map[string]ExportCheck - wantSchemaDeletes map[string]struct{} - - wantReconcileStatus reconcileStatus - wantRequeue time.Duration - wantError bool - }{ - "some other export": { - export: export(logicalcluster.NewPath("root:org:ws"), "test"), - wantReconcileStatus: reconcileStatusStop, - }, - "no negotiated API resources": { - export: export(logicalcluster.NewPath("root:org:ws"), "kubernetes"), - wantReconcileStatus: reconcileStatusStop, - }, - "dangling schema, but no negotiated API resources": { - export: export(logicalcluster.NewPath("root:org:ws"), "kubernetes", "rev-43.deployments.apps"), - wantReconcileStatus: reconcileStatusStop, - }, - "negotiated API resource, but some other export": { - export: export(logicalcluster.NewPath("root:org:ws"), "something", "rev-10.services.core"), - negotiatedResources: map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource{ - "root:org:ws": { - negotiatedAPIResource(logicalcluster.NewPath("root:org:ws"), "core", "v1", "Service"), - }, - }, - schemas: map[logicalcluster.Name][]*apisv1alpha1.APIResourceSchema{ - "root:org:ws": { - withExportOwner(apiResourceSchema(logicalcluster.NewPath("root:org:ws"), "rev-10", "", "v1", "Service"), "kubernetes"), // older RV - }, - }, - wantReconcileStatus: reconcileStatusStop, - }, - "full api resource": { - export: export(logicalcluster.NewPath("root:org:ws"), workloadv1alpha1.ImportedAPISExportName, "rev-0.deployments.apps"), - negotiatedResources: map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource{ - "root:org:ws": { - { - ObjectMeta: metav1.ObjectMeta{ - Name: "deployments.v1.apps", - ResourceVersion: "52", - }, - Spec: apiresourcev1alpha1.NegotiatedAPIResourceSpec{ - CommonAPIResourceSpec: apiresourcev1alpha1.CommonAPIResourceSpec{ - GroupVersion: apiresourcev1alpha1.GroupVersion{Group: "apps", Version: "v1"}, - Scope: "Namespaced", - CustomResourceDefinitionNames: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "deployments", - Singular: "deployment", - Kind: "Deployment", - ListKind: "DeploymentList", - }, - OpenAPIV3Schema: runtime.RawExtension{ - Raw: []byte(`{ - "type": "object", - "properties": { - "spec": { - "type": "object" - } - } - }`), - }, - SubResources: apiresourcev1alpha1.SubResources{ - {Name: "scale"}, - {Name: "status"}, - }, - ColumnDefinitions: apiresourcev1alpha1.ColumnDefinitions{ - { - TableColumnDefinition: metav1.TableColumnDefinition{ - Name: "replicas", - Type: "number", - Description: "Number of replicas", - Priority: 0, - }, - JSONPath: pointer.String(".status.replicas"), - }, - { - TableColumnDefinition: metav1.TableColumnDefinition{ - Name: "available", - Type: "number", - Description: "Number of available replicas", - Priority: 0, - }, - JSONPath: pointer.String(".status.availableReplicas"), - }, - }, - }, - Publish: false, - }, - }, - }, - }, - wantSchemaCreates: map[string]SchemaCheck{ - "rev-52.deployments.apps": equals(&apisv1alpha1.APIResourceSchema{ - ObjectMeta: metav1.ObjectMeta{ - Name: "rev-52.deployments.apps", - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "apis.kcp.io/v1alpha1", - Kind: "APIExport", - Name: workloadv1alpha1.ImportedAPISExportName, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), - }, - }, - }, - Spec: apisv1alpha1.APIResourceSchemaSpec{ - Group: "apps", - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "deployments", - Singular: "deployment", - Kind: "Deployment", - ListKind: "DeploymentList", - }, - Scope: "Namespaced", - Versions: []apisv1alpha1.APIResourceVersion{ - { - Name: "v1", - Served: true, - Storage: true, - Schema: runtime.RawExtension{ - Raw: []byte(`{ - "type": "object", - "properties": { - "spec": { - "type": "object" - } - } - }`), - }, - Subresources: apiextensionsv1.CustomResourceSubresources{ - Scale: &apiextensionsv1.CustomResourceSubresourceScale{ - SpecReplicasPath: ".spec.replicas", - StatusReplicasPath: ".status.replicas", - }, - Status: &apiextensionsv1.CustomResourceSubresourceStatus{}, - }, - AdditionalPrinterColumns: []apiextensionsv1.CustomResourceColumnDefinition{ - { - Name: "replicas", - Type: "number", - Description: "Number of replicas", - Priority: 0, - JSONPath: ".status.replicas", - }, - { - Name: "available", - Type: "number", - Description: "Number of available replicas", - Priority: 0, - JSONPath: ".status.availableReplicas", - }, - }, - }, - }, - }, - }), - }, - wantExportUpdates: map[string]ExportCheck{ - workloadv1alpha1.ImportedAPISExportName: hasSchemas("rev-52.deployments.apps"), - }, - wantSchemaDeletes: map[string]struct{}{"rev-0.deployments.apps": {}}, - wantReconcileStatus: reconcileStatusContinue, - }, - "non-triple schema name": { - export: export(logicalcluster.NewPath("root:org:ws"), workloadv1alpha1.ImportedAPISExportName, "rev-0.services.core"), - negotiatedResources: map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource{ - "root:org:ws": { - negotiatedAPIResource(logicalcluster.NewPath("root:org:ws"), "core", "v1", "Service"), - }, - }, - wantSchemaCreates: map[string]SchemaCheck{ - "rev-15.services.core": someSchemaWeDontCareAboutInDetail, - }, - wantExportUpdates: map[string]ExportCheck{ - workloadv1alpha1.ImportedAPISExportName: hasSchemas("rev-15.services.core"), - }, - wantSchemaDeletes: map[string]struct{}{"rev-0.services.core": {}}, - wantReconcileStatus: reconcileStatusContinue, - }, - "dangling schema in export": { - export: export(logicalcluster.NewPath("root:org:ws"), workloadv1alpha1.ImportedAPISExportName, "rev-0.services.core"), - negotiatedResources: map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource{ - "root:org:ws": { - negotiatedAPIResource(logicalcluster.NewPath("root:org:ws"), "core", "v1", "Service"), - }, - }, - schemas: map[logicalcluster.Name][]*apisv1alpha1.APIResourceSchema{ - "root:org:ws": { - withExportOwner(apiResourceSchema(logicalcluster.NewPath("root:org:ws"), "rev-0", "", "v1alpha1", "Service"), workloadv1alpha1.ImportedAPISExportName), - }, - }, - wantSchemaCreates: map[string]SchemaCheck{ - "rev-15.services.core": someSchemaWeDontCareAboutInDetail, - }, - wantExportUpdates: map[string]ExportCheck{ - workloadv1alpha1.ImportedAPISExportName: hasSchemas("rev-15.services.core"), - }, - wantSchemaDeletes: map[string]struct{}{"rev-0.services.core": {}}, - wantReconcileStatus: reconcileStatusContinue, - }, - "up-to-date schema": { - export: export(logicalcluster.NewPath("root:org:ws"), workloadv1alpha1.ImportedAPISExportName, "rev-10.services.core"), - negotiatedResources: map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource{ - "root:org:ws": { - negotiatedAPIResource(logicalcluster.NewPath("root:org:ws"), "core", "v1", "Service"), - }, - }, - schemas: map[logicalcluster.Name][]*apisv1alpha1.APIResourceSchema{ - "root:org:ws": { - withExportOwner(apiResourceSchema(logicalcluster.NewPath("root:org:ws"), "rev-10", "", "v1", "Service"), workloadv1alpha1.ImportedAPISExportName), // older RV - }, - }, - wantReconcileStatus: reconcileStatusContinue, - }, - "outdated schema": { - export: export(logicalcluster.NewPath("root:org:ws"), workloadv1alpha1.ImportedAPISExportName, "rev-10.services.core"), - negotiatedResources: map[logicalcluster.Name][]*apiresourcev1alpha1.NegotiatedAPIResource{ - "root:org:ws": { - negotiatedAPIResource(logicalcluster.NewPath("root:org:ws"), "core", "v1", "Service"), - }, - }, - schemas: map[logicalcluster.Name][]*apisv1alpha1.APIResourceSchema{ - "root:org:ws": { - withDifferentOpenAPI( - withExportOwner(apiResourceSchema(logicalcluster.NewPath("root:org:ws"), "rev-10", "", "v1", "Service"), workloadv1alpha1.ImportedAPISExportName), - `{"type":"object"}`, - ), - }, - }, - wantSchemaCreates: map[string]SchemaCheck{ - "rev-15.services.core": someSchemaWeDontCareAboutInDetail, - }, - wantExportUpdates: map[string]ExportCheck{ - workloadv1alpha1.ImportedAPISExportName: hasSchemas("rev-15.services.core"), - }, - wantSchemaDeletes: map[string]struct{}{"rev-10.services.core": {}}, - wantReconcileStatus: reconcileStatusContinue, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var requeuedAfter time.Duration - schemaCreates := map[string]*apisv1alpha1.APIResourceSchema{} - exportUpdates := map[string]*apisv1alpha1.APIExport{} - schemeDeletes := map[string]struct{}{} - r := &schemaReconciler{ - listNegotiatedAPIResources: func(clusterName logicalcluster.Name) ([]*apiresourcev1alpha1.NegotiatedAPIResource, error) { - if tc.listNegotiatedAPIResourcesError != nil { - return nil, tc.listNegotiatedAPIResourcesError - } - return tc.negotiatedResources[clusterName], nil - }, - listSyncTargets: func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - return tc.syncTargets[clusterName], nil - }, - listAPIResourceSchemas: func(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIResourceSchema, error) { - if tc.listAPIResourceSchemaError != nil { - return nil, tc.listAPIResourceSchemaError - } - return tc.schemas[clusterName], nil - }, - getAPIResourceSchema: func(ctx context.Context, clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) { - if tc.getAPIResourceSchemaError != nil { - return nil, tc.getAPIResourceSchemaError - } - for _, s := range tc.schemas[clusterName] { - if s.Name == name { - return s, nil - } - } - return nil, apierrors.NewNotFound(schema.GroupResource{Group: "apis.kcp.io", Resource: "apiresourceschemas"}, name) - }, - createAPIResourceSchema: func(ctx context.Context, clusterName logicalcluster.Path, schema *apisv1alpha1.APIResourceSchema) (*apisv1alpha1.APIResourceSchema, error) { - if tc.createAPIResourceSchemaError != nil { - return nil, tc.createAPIResourceSchemaError - } - schemaCreates[schema.Name] = schema.DeepCopy() - return schema, nil - }, - updateAPIExport: func(ctx context.Context, clusterName logicalcluster.Path, export *apisv1alpha1.APIExport) (*apisv1alpha1.APIExport, error) { - if tc.updateAPIExportError != nil { - return nil, tc.updateAPIExportError - } - exportUpdates[export.Name] = export.DeepCopy() - return export, nil - }, - deleteAPIResourceSchema: func(ctx context.Context, clusterName logicalcluster.Path, name string) error { - if tc.deleteAPIResourceSchemaError != nil { - return tc.deleteAPIResourceSchemaError - } - schemeDeletes[name] = struct{}{} - return nil - }, - enqueueAfter: func(export *apisv1alpha1.APIExport, duration time.Duration) { - requeuedAfter = duration - }, - } - - status, err := r.reconcile(context.Background(), tc.export) - if tc.wantError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - - require.Equal(t, status, tc.wantReconcileStatus) - require.Equal(t, tc.wantRequeue, requeuedAfter) - - // check creates - for _, s := range schemaCreates { - require.Contains(t, tc.wantSchemaCreates, s.Name, "got unexpected create:\n%s", toYaml(s)) - tc.wantSchemaCreates[s.Name](t, s) - } - for name := range tc.wantSchemaCreates { - require.Contains(t, schemaCreates, name, "missing create of %s", name) - } - - // check updates - for _, e := range exportUpdates { - require.Contains(t, tc.wantExportUpdates, e.Name, "got unexpected update:\n%s", toYaml(e)) - tc.wantExportUpdates[e.Name](t, e) - } - for name := range tc.wantExportUpdates { - require.Contains(t, exportUpdates, name, "missing update for %s", name) - } - - // check deletes - for name := range schemeDeletes { - require.Contains(t, tc.wantSchemaDeletes, name, "got unexpected delete of %q", name) - } - for name := range tc.wantSchemaDeletes { - require.Contains(t, schemeDeletes, name, "missing delete of %q", name) - } - }) - } -} - -func export(clusterName logicalcluster.Path, name string, exports ...string) *apisv1alpha1.APIExport { - return &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: map[string]string{ - logicalcluster.AnnotationKey: clusterName.String(), - }, - }, - Spec: apisv1alpha1.APIExportSpec{ - LatestResourceSchemas: exports, - }, - } -} - -func apiResourceSchema(clusterName logicalcluster.Path, prefix string, group string, version string, kind string) *apisv1alpha1.APIResourceSchema { - nameGroup := group - if nameGroup == "" { - nameGroup = "core" - } - return &apisv1alpha1.APIResourceSchema{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s.%ss.%s", prefix, strings.ToLower(kind), nameGroup), - Annotations: map[string]string{ - logicalcluster.AnnotationKey: clusterName.String(), - }, - }, - Spec: apisv1alpha1.APIResourceSchemaSpec{ - Group: group, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: strings.ToLower(kind) + "s", - Singular: strings.ToLower(kind), - Kind: kind, - ListKind: kind + "List", - }, - Scope: "Namespaced", - Versions: []apisv1alpha1.APIResourceVersion{ - { - Name: version, - Served: true, - Storage: true, - Schema: runtime.RawExtension{ - Raw: []byte(`{ - "type": "object", - "properties": { - "spec": { - "type": "object" - } - } - }`), - }, - }, - }, - }, - } -} - -func withExportOwner(schema *apisv1alpha1.APIResourceSchema, exportName string) *apisv1alpha1.APIResourceSchema { - schema.OwnerReferences = append(schema.OwnerReferences, metav1.OwnerReference{ - APIVersion: apisv1alpha1.SchemeGroupVersion.String(), - Kind: "APIExport", - Name: exportName, - Controller: pointer.Bool(true), - BlockOwnerDeletion: pointer.Bool(true), - }) - return schema -} - -func withDifferentOpenAPI(schema *apisv1alpha1.APIResourceSchema, openAPISchema string) *apisv1alpha1.APIResourceSchema { - schema.Spec.Versions[0].Schema.Raw = []byte(openAPISchema) - return schema -} - -func negotiatedAPIResource(clusterName logicalcluster.Path, group string, version string, kind string) *apiresourcev1alpha1.NegotiatedAPIResource { - return &apiresourcev1alpha1.NegotiatedAPIResource{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%ss.%s.%s", strings.ToLower(kind), version, group), - ResourceVersion: "15", - }, - Spec: apiresourcev1alpha1.NegotiatedAPIResourceSpec{ - CommonAPIResourceSpec: apiresourcev1alpha1.CommonAPIResourceSpec{ - GroupVersion: apiresourcev1alpha1.GroupVersion{Group: group, Version: version}, - Scope: "Namespaced", - CustomResourceDefinitionNames: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: strings.ToLower(kind) + "s", - Singular: strings.ToLower(kind), - Kind: kind, - ListKind: kind + "List", - }, - OpenAPIV3Schema: runtime.RawExtension{ - Raw: []byte(`{ - "type": "object", - "properties": { - "spec": { - "type": "object" - } - } - }`), - }, - }, - Publish: false, - }, - } -} - -func toYaml(obj interface{}) string { - bytes, err := yaml.Marshal(obj) - if err != nil { - panic(err) - } - return string(bytes) -} diff --git a/pkg/reconciler/workload/defaultlocation/defaultlocation_controller.go b/pkg/reconciler/workload/defaultlocation/defaultlocation_controller.go deleted file mode 100644 index 83aebdde033..00000000000 --- a/pkg/reconciler/workload/defaultlocation/defaultlocation_controller.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apiexport - -import ( - "context" - "fmt" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - schedulingv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - schedulingv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/scheduling/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-workload-default-location" - - DefaultLocationName = "default" -) - -// NewController returns a new controller instance. -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, - locationInformer schedulingv1alpha1informers.LocationClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) - - c := &controller{ - queue: queue, - enqueueAfter: func(export *apisv1alpha1.APIExport, duration time.Duration) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(export) - if err != nil { - runtime.HandleError(err) - return - } - queue.AddAfter(key, duration) - }, - - kcpClusterClient: kcpClusterClient, - syncTargetLister: syncTargetInformer.Lister(), - locationLister: locationInformer.Lister(), - } - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueue(obj) }, - DeleteFunc: func(obj interface{}) { c.enqueue(obj) }, - }) - - _, _ = locationInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - switch t := obj.(type) { - case *schedulingv1alpha1.Location: - return t.Name == DefaultLocationName - } - return false - }, - Handler: cache.ResourceEventHandlerFuncs{ - DeleteFunc: func(obj interface{}) { c.enqueue(obj) }, - }, - }) - - return c, nil -} - -// controller reconciles watches SyncTargets and creates a APIExport and self-binding -// as soon as there is one in a workspace. -type controller struct { - queue workqueue.RateLimitingInterface - enqueueAfter func(*apisv1alpha1.APIExport, time.Duration) - - kcpClusterClient kcpclientset.ClusterInterface - - syncTargetLister workloadv1alpha1listers.SyncTargetClusterLister - locationLister schedulingv1alpha1listers.LocationClusterLister -} - -// enqueue adds the logical cluster to the queue. -func (c *controller) enqueue(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - clusterName, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return - } - - key = clusterName.String() - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - if logObj, ok := obj.(logging.Object); ok { - logger = logging.WithObject(logger, logObj) - } - logger.V(2).Info(fmt.Sprintf("queueing Workspace because of %T", obj)) - c.queue.Add(key) -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - clusterName := logicalcluster.Name(key) - - syncTargets, err := c.syncTargetLister.Cluster(clusterName).List(labels.Everything()) - if err != nil { - logger.Error(err, "failed to list clusters for workspace") - return err - } - if len(syncTargets) == 0 { - logger.V(3).Info("no clusters found for workspace. Not creating APIExport and APIBinding") - return nil - } - - // check that location exists, and create it if not - _, err = c.locationLister.Cluster(clusterName).Get(DefaultLocationName) - if err != nil && !apierrors.IsNotFound(err) { - return err - } else if apierrors.IsNotFound(err) { - location := &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: DefaultLocationName, - Annotations: map[string]string{logicalcluster.AnnotationKey: clusterName.String()}, - }, - Spec: schedulingv1alpha1.LocationSpec{ - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - InstanceSelector: &metav1.LabelSelector{}, - }, - } - logger = logging.WithObject(logger, location) - logger.Info("creating Location") - _, err = c.kcpClusterClient.Cluster(clusterName.Path()).SchedulingV1alpha1().Locations().Create(ctx, location, metav1.CreateOptions{}) - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to create Location") - return err - } - } - - return nil -} diff --git a/pkg/reconciler/workload/heartbeat/heartbeat_controller.go b/pkg/reconciler/workload/heartbeat/heartbeat_controller.go deleted file mode 100644 index ea053937617..00000000000 --- a/pkg/reconciler/workload/heartbeat/heartbeat_controller.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package heartbeat - -import ( - "context" - "fmt" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - workloadv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" -) - -const ControllerName = "kcp-synctarget-heartbeat" - -type Controller struct { - queue workqueue.RateLimitingInterface - kcpClusterClient kcpclientset.ClusterInterface - heartbeatThreshold time.Duration - commit CommitFunc - getSyncTarget func(clusterName logicalcluster.Name, name string) (*workloadv1alpha1.SyncTarget, error) -} - -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, - heartbeatThreshold time.Duration, -) (*Controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) - - c := &Controller{ - queue: queue, - kcpClusterClient: kcpClusterClient, - heartbeatThreshold: heartbeatThreshold, - commit: committer.NewCommitter[*SyncTarget, Patcher, *SyncTargetSpec, *SyncTargetStatus](kcpClusterClient.WorkloadV1alpha1().SyncTargets()), - getSyncTarget: func(clusterName logicalcluster.Name, name string) (*workloadv1alpha1.SyncTarget, error) { - return syncTargetInformer.Cluster(clusterName).Lister().Get(name) - }, - } - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueue(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueue(obj) }, - }) - - return c, nil -} - -type SyncTarget = workloadv1alpha1.SyncTarget -type SyncTargetSpec = workloadv1alpha1.SyncTargetSpec -type SyncTargetStatus = workloadv1alpha1.SyncTargetStatus -type Patcher = workloadv1alpha1client.SyncTargetInterface -type Resource = committer.Resource[*SyncTargetSpec, *SyncTargetStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -func (c *Controller) enqueue(obj interface{}) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logger.V(2).Info("queueing SyncTarget") - c.queue.Add(key) -} - -func (c *Controller) Start(ctx context.Context) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - - <-ctx.Done() -} - -func (c *Controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *Controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%s: failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *Controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "error parsing key") - return nil - } - - current, err := c.getSyncTarget(clusterName, name) - if err != nil { - if !apierrors.IsNotFound(err) { - logger.Error(err, "failed to get SyncTarget from lister", "cluster", clusterName, "name", name) - } - - return nil - } - - previous := current - current = current.DeepCopy() - - logger = logging.WithObject(logger, previous) - ctx = klog.NewContext(ctx, logger) - - var errs []error - if err := c.reconcile(ctx, key, current); err != nil { - errs = append(errs, err) - } - - oldResource := &Resource{ObjectMeta: previous.ObjectMeta, Spec: &previous.Spec, Status: &previous.Status} - newResource := &Resource{ObjectMeta: current.ObjectMeta, Spec: ¤t.Spec, Status: ¤t.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return utilerrors.NewAggregate(errs) -} diff --git a/pkg/reconciler/workload/heartbeat/heartbeat_reconciler.go b/pkg/reconciler/workload/heartbeat/heartbeat_reconciler.go deleted file mode 100644 index ed4577ba3a7..00000000000 --- a/pkg/reconciler/workload/heartbeat/heartbeat_reconciler.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package heartbeat - -import ( - "context" - "time" - - "k8s.io/klog/v2" - - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func (c *Controller) reconcile(ctx context.Context, key string, cluster *workloadv1alpha1.SyncTarget) error { - logger := klog.FromContext(ctx) - defer conditions.SetSummary( - cluster, - conditions.WithConditions( - workloadv1alpha1.SyncerReady, - workloadv1alpha1.APIImporterReady, - workloadv1alpha1.HeartbeatHealthy, - ), - ) - - latestHeartbeat := time.Time{} - if cluster.Status.LastSyncerHeartbeatTime != nil { - latestHeartbeat = cluster.Status.LastSyncerHeartbeatTime.Time - } - if latestHeartbeat.IsZero() { - logger.V(5).Info("marking HeartbeatHealthy false for SyncTarget due to no heartbeat") - conditions.MarkFalse(cluster, - workloadv1alpha1.HeartbeatHealthy, - workloadv1alpha1.ErrorHeartbeatMissedReason, - conditionsv1alpha1.ConditionSeverityWarning, - "No heartbeat yet seen") - } else if time.Since(latestHeartbeat) > c.heartbeatThreshold { - logger.V(5).Info("marking HeartbeatHealthy false for SyncTarget due to a stale heartbeat") - conditions.MarkFalse(cluster, - workloadv1alpha1.HeartbeatHealthy, - workloadv1alpha1.ErrorHeartbeatMissedReason, - conditionsv1alpha1.ConditionSeverityWarning, - "No heartbeat since %s", latestHeartbeat) - } else { - logger.V(5).Info("marking Heartbeat healthy true for SyncTarget") - conditions.MarkTrue(cluster, workloadv1alpha1.HeartbeatHealthy) - - // Enqueue another check after which the heartbeat should have been updated again. - dur := time.Until(latestHeartbeat.Add(c.heartbeatThreshold)) - c.queue.AddAfter(key, dur) - } - - return nil -} diff --git a/pkg/reconciler/workload/heartbeat/heartbeat_reconciler_test.go b/pkg/reconciler/workload/heartbeat/heartbeat_reconciler_test.go deleted file mode 100644 index c59f18443e6..00000000000 --- a/pkg/reconciler/workload/heartbeat/heartbeat_reconciler_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package heartbeat - -import ( - "context" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/workqueue" - - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type fakeDelayingQueue struct { - workqueue.RateLimitingInterface - duration time.Duration -} - -var _ workqueue.DelayingInterface = (*fakeDelayingQueue)(nil) - -func (f *fakeDelayingQueue) AddAfter(obj interface{}, duration time.Duration) { - f.duration = duration -} - -func TestReconcile(t *testing.T) { - for _, tc := range []struct { - desc string - lastHeartbeatTime time.Time - wantDur time.Duration - wantReady bool - }{{ - desc: "no last heartbeat", - wantReady: false, - }, { - desc: "recent enough heartbeat", - lastHeartbeatTime: time.Now().Add(-10 * time.Second), - wantDur: 50 * time.Second, - wantReady: true, - }, { - desc: "not recent enough heartbeat", - lastHeartbeatTime: time.Now().Add(-90 * time.Second), - wantReady: false, - }} { - t.Run(tc.desc, func(t *testing.T) { - queue := &fakeDelayingQueue{ - RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "testing"), - } - c := &Controller{ - queue: queue, - heartbeatThreshold: time.Minute, - } - ctx := context.Background() - heartbeat := metav1.NewTime(tc.lastHeartbeatTime) - syncTarget := &workloadv1alpha1.SyncTarget{ - Status: workloadv1alpha1.SyncTargetStatus{ - Conditions: []conditionsv1alpha1.Condition{{ - Type: workloadv1alpha1.HeartbeatHealthy, - Status: corev1.ConditionTrue, - }}, - LastSyncerHeartbeatTime: &heartbeat, - }, - } - if err := c.reconcile(ctx, "somekey", syncTarget); err != nil { - t.Fatalf("reconcile: %v", err) - } - - // actual enqueued time must not be more than 30ms off from desired enqueue time. - delta := 30 * time.Millisecond - if tc.wantDur-delta > queue.duration { - t.Errorf("next enqueue time; got %s, want %s", queue.duration, tc.wantDur) - } - isReady := syncTarget.GetConditions()[0].Status == corev1.ConditionTrue - if isReady != tc.wantReady { - t.Errorf("SyncTarget Ready; got %t, want %t", isReady, tc.wantReady) - } - // TODO: check wantReady. - }) - } -} diff --git a/pkg/reconciler/workload/heartbeat/options/options.go b/pkg/reconciler/workload/heartbeat/options/options.go deleted file mode 100644 index 5e84a930058..00000000000 --- a/pkg/reconciler/workload/heartbeat/options/options.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "fmt" - "time" - - "github.com/spf13/pflag" -) - -func NewOptions() *Options { - return &Options{ - HeartbeatThreshold: time.Minute, - } -} - -func (o *Options) AddFlags(fs *pflag.FlagSet) { - if o == nil { - return - } - - fs.DurationVar(&o.HeartbeatThreshold, "sync-target-heartbeat-threshold", o.HeartbeatThreshold, "Amount of time to wait for a successful heartbeat before marking the cluster as not ready") -} - -type Options struct { - HeartbeatThreshold time.Duration -} - -func (o *Options) Validate() error { - if o == nil { - return nil - } - - if o.HeartbeatThreshold <= 0 { - return fmt.Errorf("--sync-target-heartbeat-threshold must be >0 (%s)", o.HeartbeatThreshold) - } - - return nil -} diff --git a/pkg/reconciler/workload/namespace/namespace_controller.go b/pkg/reconciler/workload/namespace/namespace_controller.go deleted file mode 100644 index e48ecdf77c4..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_controller.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "fmt" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpcorev1informers "github.com/kcp-dev/client-go/informers/core/v1" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/apis/apiexport" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" -) - -const ( - ControllerName = "kcp-namespace-scheduling-placement" -) - -// NewController returns a new controller starting the process of placing namespaces onto locations by creating -// a placement annotation. -func NewController( - kubeClusterClient kcpkubernetesclientset.ClusterInterface, - namespaceInformer kcpcorev1informers.NamespaceClusterInformer, - placementInformer schedulingv1alpha1informers.PlacementClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) - - c := &controller{ - queue: queue, - - kubeClusterClient: kubeClusterClient, - - listNamespaces: func(clusterName logicalcluster.Name) ([]*corev1.Namespace, error) { - return namespaceInformer.Cluster(clusterName).Lister().List(labels.Everything()) - }, - getNamespace: func(clusterName logicalcluster.Name, name string) (*corev1.Namespace, error) { - return namespaceInformer.Cluster(clusterName).Lister().Get(name) - }, - listPlacements: func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Placement, error) { - return placementInformer.Cluster(clusterName).Lister().List(labels.Everything()) - }, - commit: committer.NewCommitter[*Namespace, Patcher, *NamespaceSpec, *NamespaceStatus](kubeClusterClient.CoreV1().Namespaces()), - now: time.Now, - } - - // namespaceBlocklist holds a set of namespaces that should never be synced from kcp to physical clusters. - var namespaceBlocklist = sets.New[string]("kube-system", "kube-public", "kube-node-lease", apiexport.DefaultIdentitySecretNamespace) - _, _ = namespaceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - switch ns := obj.(type) { - case *corev1.Namespace: - return !namespaceBlocklist.Has(ns.Name) - case cache.DeletedFinalStateUnknown: - return true - default: - return false - } - }, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueueNamespace, - UpdateFunc: func(_, obj interface{}) { c.enqueueNamespace(obj) }, - DeleteFunc: c.enqueueNamespace, - }, - }) - - _, _ = placementInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueuePlacement(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueuePlacement(obj) }, - DeleteFunc: func(obj interface{}) { c.enqueuePlacement(obj) }, - }) - - return c, nil -} - -// controller. -type controller struct { - queue workqueue.RateLimitingInterface - - kubeClusterClient kcpkubernetesclientset.ClusterInterface - - listNamespaces func(clusterName logicalcluster.Name) ([]*corev1.Namespace, error) - getNamespace func(clusterName logicalcluster.Name, name string) (*corev1.Namespace, error) - listPlacements func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Placement, error) - commit CommitFunc - now func() time.Time -} - -type Namespace = corev1.Namespace -type NamespaceSpec = corev1.NamespaceSpec -type NamespaceStatus = corev1.NamespaceStatus -type Patcher = corev1client.NamespaceInterface -type Resource = committer.Resource[*NamespaceSpec, *NamespaceStatus] -type CommitFunc = func(ctx context.Context, original, updated *Resource) error - -func (c *controller) enqueueNamespace(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logger.V(2).Info("queueing Namespace") - c.queue.Add(key) -} - -func (c *controller) enqueuePlacement(obj interface{}) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - clusterName, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return - } - - namespaces, err := c.listNamespaces(clusterName) - if err != nil { - runtime.HandleError(err) - return - } - - logger := logging.WithObject(logging.WithReconciler(klog.Background(), ControllerName), obj.(*schedulingv1alpha1.Placement)) - for _, ns := range namespaces { - logger = logging.WithObject(logger, ns) - - nsKey, err := kcpcache.MetaClusterNamespaceKeyFunc(ns) - if err != nil { - runtime.HandleError(err) - continue - } - logging.WithQueueKey(logger, nsKey).V(2).Info("queueing Namespace because of Placement") - c.queue.Add(nsKey) - } -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "invalid key") - return nil - } - - ns, err := c.getNamespace(clusterName, name) - if err != nil { - if errors.IsNotFound(err) { - return nil // object deleted before we handled it - } - return err - } - old := ns - ns = ns.DeepCopy() - - logger = logging.WithObject(logger, ns) - ctx = klog.NewContext(ctx, logger) - - var errs []error - if err := c.reconcile(ctx, key, ns); err != nil { - errs = append(errs, err) - } - - oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status} - newResource := &Resource{ObjectMeta: ns.ObjectMeta, Spec: &ns.Spec, Status: &ns.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return utilerrors.NewAggregate(errs) -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile.go b/pkg/reconciler/workload/namespace/namespace_reconcile.go deleted file mode 100644 index 44d63cfa6d0..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "time" - - corev1 "k8s.io/api/core/v1" -) - -type reconcileResult struct { - stop bool - requeueAfter time.Duration -} - -type reconcileFunc func(ctx context.Context, key string, ns *corev1.Namespace) (reconcileResult, error) - -func (c *controller) reconcile(ctx context.Context, key string, ns *corev1.Namespace) error { - reconcilers := []reconcileFunc{ - c.reconcilePlacementBind, - c.reconcileScheduling, - c.reconcileStatus, - } - - for _, r := range reconcilers { - result, err := r(ctx, key, ns) - if err != nil { - return err - } - - if result.stop { - break - } - - if result.requeueAfter > 0 { - c.queue.AddAfter(key, result.requeueAfter) - } - } - - return nil -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile_placementbind.go b/pkg/reconciler/workload/namespace/namespace_reconcile_placementbind.go deleted file mode 100644 index d1e945764dd..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile_placementbind.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -// reconcilePlacementBind updates the existing scheduling.kcp.io/placement annotation and creates an -// empty one if at least one placement matches and there is no annotation. It deletes the annotation -// if there is no matched placement. -// -// TODO this should be reconsidered when we want lazy binding. -func (c *controller) reconcilePlacementBind( - _ context.Context, - _ string, - ns *corev1.Namespace, -) (reconcileResult, error) { - clusterName := logicalcluster.From(ns) - - validPlacements, err := c.validPlacements(clusterName, ns) - if err != nil { - return reconcileResult{stop: true}, err - } - - _, hasPlacement := ns.Annotations[schedulingv1alpha1.PlacementAnnotationKey] - shouldHavePlacement := len(validPlacements) > 0 - - switch { - case shouldHavePlacement && hasPlacement, !shouldHavePlacement && !hasPlacement: - return reconcileResult{}, nil - case shouldHavePlacement && !hasPlacement: - if ns.Annotations == nil { - ns.Annotations = make(map[string]string) - } - ns.Annotations[schedulingv1alpha1.PlacementAnnotationKey] = "" - case !shouldHavePlacement && hasPlacement: - delete(ns.Annotations, schedulingv1alpha1.PlacementAnnotationKey) - } - - return reconcileResult{stop: true}, nil -} - -func (c *controller) validPlacements(clusterName logicalcluster.Name, ns *corev1.Namespace) ([]*schedulingv1alpha1.Placement, error) { - placements, err := c.listPlacements(clusterName) - - if err != nil { - return nil, err - } - - return filterValidPlacements(ns, placements), nil -} - -func filterValidPlacements(ns *corev1.Namespace, placements []*schedulingv1alpha1.Placement) []*schedulingv1alpha1.Placement { - var candidates []*schedulingv1alpha1.Placement - for _, placement := range placements { - if placement.Status.Phase == schedulingv1alpha1.PlacementPending { - continue - } - if conditions.IsFalse(placement, schedulingv1alpha1.PlacementReady) { - continue - } - if isPlacementValidForNS(ns, placement) { - candidates = append(candidates, placement) - } - } - - return candidates -} - -func isPlacementValidForNS(ns *corev1.Namespace, placement *schedulingv1alpha1.Placement) bool { - selector, err := metav1.LabelSelectorAsSelector(placement.Spec.NamespaceSelector) - if err != nil { - return false - } - - return selector.Matches(labels.Set(ns.Labels)) -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile_placementbind_test.go b/pkg/reconciler/workload/namespace/namespace_reconcile_placementbind_test.go deleted file mode 100644 index dad6cbf2d99..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile_placementbind_test.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -func TestBindPlacement(t *testing.T) { - testCases := []struct { - name string - placementPhase schedulingv1alpha1.PlacementPhase - isReady bool - labels map[string]string - annotations map[string]string - namespaceSelector *metav1.LabelSelector - - expectedAnnotation map[string]string - expectStop bool - }{ - { - name: "placement is pending", - placementPhase: schedulingv1alpha1.PlacementPending, - isReady: true, - }, - { - name: "placement is not ready", - placementPhase: schedulingv1alpha1.PlacementBound, - isReady: false, - }, - { - name: "placement does not select the namespace", - placementPhase: schedulingv1alpha1.PlacementBound, - isReady: true, - labels: map[string]string{ - "foor": "bar", - }, - namespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo1": "bar1"}, - }, - }, - { - name: "choose a placement", - placementPhase: schedulingv1alpha1.PlacementBound, - isReady: true, - namespaceSelector: &metav1.LabelSelector{}, - expectStop: true, - expectedAnnotation: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - }, - { - name: "do not patch if there is existing placement annotation", - placementPhase: schedulingv1alpha1.PlacementBound, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - isReady: true, - namespaceSelector: &metav1.LabelSelector{}, - expectedAnnotation: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - }, - { - name: "update if existing placement is not ready", - placementPhase: schedulingv1alpha1.PlacementBound, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: `{"test-placement":"Bound"}`, - }, - isReady: false, - namespaceSelector: &metav1.LabelSelector{}, - expectStop: true, - expectedAnnotation: map[string]string{}, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Labels: testCase.labels, - Annotations: testCase.annotations, - }, - } - - testPlacement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-placement", - }, - Spec: schedulingv1alpha1.PlacementSpec{ - NamespaceSelector: testCase.namespaceSelector, - }, - Status: schedulingv1alpha1.PlacementStatus{ - Phase: testCase.placementPhase, - }, - } - - if testCase.isReady { - conditions.MarkTrue(testPlacement, schedulingv1alpha1.PlacementReady) - } else { - conditions.MarkFalse( - testPlacement, schedulingv1alpha1.PlacementReady, "TestNotReady", conditionsv1alpha1.ConditionSeverityError, "") - } - - listPlacement := func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Placement, error) { - return []*schedulingv1alpha1.Placement{testPlacement}, nil - } - - c := &controller{ - listPlacements: listPlacement, - } - - result, err := c.reconcilePlacementBind(context.Background(), "key", ns) - require.NoError(t, err) - require.Equal(t, testCase.expectedAnnotation, ns.Annotations) - require.Equal(t, testCase.expectStop, result.stop) - require.Zero(t, result.requeueAfter) - }) - } -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile_scheduling.go b/pkg/reconciler/workload/namespace/namespace_reconcile_scheduling.go deleted file mode 100644 index bac53080608..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile_scheduling.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "strings" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const removingGracePeriod = 5 * time.Second - -// reconcileScheduling reconciles the state.workload.kcp.io/ labels according the -// selected synctarget stored in the internal.workload.kcp.io/synctarget annotation -// on each placement. -func (c *controller) reconcileScheduling( - ctx context.Context, - _ string, - ns *corev1.Namespace, -) (reconcileResult, error) { - logger := klog.FromContext(ctx) - clusterName := logicalcluster.From(ns) - - validPlacements := []*schedulingv1alpha1.Placement{} - _, foundPlacement := ns.Annotations[schedulingv1alpha1.PlacementAnnotationKey] - - if foundPlacement { - placements, err := c.listPlacements(clusterName) - if err != nil { - return reconcileResult{}, err - } - - validPlacements = filterValidPlacements(ns, placements) - } - - // 1. pick all synctargets in all bound placements - scheduledSyncTargets := sets.New[string]() - for _, placement := range validPlacements { - currentScheduled, foundScheduled := placement.Annotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey] - if !foundScheduled { - continue - } - scheduledSyncTargets.Insert(currentScheduled) - } - - // 2. find the scheduled synctarget to the ns, including synced, removing - syncStatus := syncStatusFor(ns) - - // 3. if the synced synctarget is not in the scheduled synctargets, mark it as removing. - changed := false - annotations := ns.Annotations - labels := ns.Labels - - for syncTarget := range syncStatus.active { - if !scheduledSyncTargets.Has(syncTarget) { - // it is no longer a synced synctarget, mark it as removing. - now := c.now().UTC().Format(time.RFC3339) - annotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+syncTarget] = now - changed = true - logger.WithValues("syncTarget", syncTarget).V(4).Info("setting SyncTarget as removing for Namespace since it is not a valid syncTarget anymore") - } - } - - // 4. remove the synctarget after grace period - minEnqueueDuration := removingGracePeriod + 1 - for cluster, removingTime := range syncStatus.pendingRemoval { - if removingTime.Add(removingGracePeriod).Before(c.now()) { - delete(labels, workloadv1alpha1.ClusterResourceStateLabelPrefix+cluster) - delete(annotations, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+cluster) - changed = true - logger.WithValues("syncTarget", cluster).V(4).Info("removing SyncTarget for Namespace") - } else { - enqueueDuration := time.Until(removingTime.Add(removingGracePeriod)) - if enqueueDuration < minEnqueueDuration { - minEnqueueDuration = enqueueDuration - } - } - } - - // 5. if a scheduled synctarget is not in synced and removing, add it in to the label - for scheduledSyncTarget := range scheduledSyncTargets { - if syncStatus.active.Has(scheduledSyncTarget) { - continue - } - if _, ok := syncStatus.pendingRemoval[scheduledSyncTarget]; ok { - continue - } - - if labels == nil { - labels = make(map[string]string) - ns.Labels = labels - } - labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+scheduledSyncTarget] = string(workloadv1alpha1.ResourceStateSync) - changed = true - logger.WithValues("syncTarget", scheduledSyncTarget).V(4).Info("setting syncTarget as sync for Namespace") - } - - // 6. Requeue at last to check if removing syncTarget should be removed later. - var requeueAfter time.Duration - if minEnqueueDuration <= removingGracePeriod { - logger.WithValues("after", minEnqueueDuration).V(2).Info("enqueue Namespace later") - requeueAfter = minEnqueueDuration - } - - return reconcileResult{stop: changed, requeueAfter: requeueAfter}, nil -} - -func syncStatusFor(ns *corev1.Namespace) namespaceSyncStatus { - status := namespaceSyncStatus{ - active: sets.New[string](), - pendingRemoval: make(map[string]time.Time), - } - - for k := range ns.Labels { - if !strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - continue - } - - syncTarget := strings.TrimPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) - - deletionAnnotationKey := workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + syncTarget - - if value, ok := ns.Annotations[deletionAnnotationKey]; ok { - removingTime, _ := time.Parse(time.RFC3339, value) - status.pendingRemoval[syncTarget] = removingTime - continue - } - - status.active.Insert(syncTarget) - } - - return status -} - -type namespaceSyncStatus struct { - active sets.Set[string] - pendingRemoval map[string]time.Time -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile_scheduling_test.go b/pkg/reconciler/workload/namespace/namespace_reconcile_scheduling_test.go deleted file mode 100644 index c6d5f232918..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile_scheduling_test.go +++ /dev/null @@ -1,365 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/workqueue" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestScheduling(t *testing.T) { - now := time.Now() - now3339 := now.UTC().Format(time.RFC3339) - - testCases := []struct { - name string - - noPlacements bool - placement *schedulingv1alpha1.Placement - - labels map[string]string - annotations map[string]string - - expectStop bool - expectedLabels map[string]string - expectedAnnotations map[string]string - expectRequeue bool - }{ - { - name: "placement not found", - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "cluster1": string(workloadv1alpha1.ResourceStateSync), - }, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - noPlacements: true, - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "cluster1": now3339, - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "cluster1": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "schedule a synctarget", - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - placement: newPlacement("test-placement", "test-location", "test-cluster"), - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "no update when synctargets is scheduled", - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - placement: newPlacement("test-placement", "test-location", "test-cluster"), - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "synctargets becomes not ready", - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - placement: newPlacement("test-placement", "test-location", ""), - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": now3339, - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "select a new synctarget", - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - placement: newPlacement("test-placement", "test-location", "test-cluster-2"), - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": now3339, - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQA9mRmZ5RuT9vKRZokxZTm1Yk9SqKyfOMoTEr": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "scheduled cluster is removing", - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": now3339, - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - placement: newPlacement("test-placement", "test-location", "test-cluster"), - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": now3339, - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - expectRequeue: true, - }, - { - name: "remove clusters which is removing after grace period", - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": time.Now().Add(-1 * (removingGracePeriod + 1)).UTC().Format(time.RFC3339), - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "34sZi3721YwBLDHUuNVIOLxuYp5nEZBpsTQyDq": string(workloadv1alpha1.ResourceStateSync), - }, - placement: newPlacement("test-placement", "test-location", ""), - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectedLabels: map[string]string{}, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Labels: testCase.labels, - Annotations: testCase.annotations, - }, - } - - listPlacement := func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Placement, error) { - if testCase.noPlacements { - return []*schedulingv1alpha1.Placement{}, nil - } - - return []*schedulingv1alpha1.Placement{testCase.placement}, nil - } - - c := &controller{ - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - listPlacements: listPlacement, - now: func() time.Time { return now }, - } - - result, err := c.reconcileScheduling(context.Background(), "key", ns) - require.NoError(t, err) - require.Equal(t, testCase.expectedAnnotations, ns.Annotations) - require.Equal(t, testCase.expectedLabels, ns.Labels) - require.Equal(t, testCase.expectStop, result.stop) - if testCase.expectRequeue { - require.NotZero(t, result.requeueAfter) - } else { - require.Zero(t, result.requeueAfter) - } - }) - } -} - -func TestMultiplePlacements(t *testing.T) { - now := time.Now() - now3339 := now.UTC().Format(time.RFC3339) - - testCases := []struct { - name string - placements []*schedulingv1alpha1.Placement - - labels map[string]string - annotations map[string]string - - expectStop bool - expectedLabels map[string]string - expectedAnnotations map[string]string - }{ - { - name: "schedule to two location", - placements: []*schedulingv1alpha1.Placement{ - newPlacement("p1", "loc1", "c1"), - newPlacement("p2", "loc2", "c2"), - }, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "placement select the same location", - placements: []*schedulingv1alpha1.Placement{ - newPlacement("p1", "loc1", "c1"), - newPlacement("p2", "loc1", "c1"), - }, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "cluster are scheduled already", - placements: []*schedulingv1alpha1.Placement{ - newPlacement("p1", "loc1", "c1"), - newPlacement("p2", "loc2", "c2"), - }, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": string(workloadv1alpha1.ResourceStateSync), - }, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": string(workloadv1alpha1.ResourceStateSync), - }, - }, - { - name: "cluster are rescheduled when removing", - placements: []*schedulingv1alpha1.Placement{ - newPlacement("p1", "loc1", "c3"), - newPlacement("p2", "loc2", "c4"), - }, - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": now3339, - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": now3339, - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": string(workloadv1alpha1.ResourceStateSync), - }, - expectStop: true, - expectedAnnotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": now3339, - workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": now3339, - }, - expectedLabels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "5iSfzYTm7pPirj6HKlmfvXMb6AuqSBxNB7vkVP": string(workloadv1alpha1.ResourceStateSync), - workloadv1alpha1.ClusterResourceStateLabelPrefix + "8s5f69zIcmjG486nG2jBF8BdYtgwPS7PVP1bTL": string(workloadv1alpha1.ResourceStateSync), - }, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - fmt.Printf("name %s\n", testCase.name) - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Labels: testCase.labels, - Annotations: testCase.annotations, - }, - } - - listPlacement := func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Placement, error) { - return testCase.placements, nil - } - - c := &controller{ - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - listPlacements: listPlacement, - now: func() time.Time { return now }, - } - - result, err := c.reconcileScheduling(context.Background(), "key", ns) - require.NoError(t, err) - require.Equal(t, testCase.expectStop, result.stop) - require.Equal(t, testCase.expectedAnnotations, ns.Annotations) - require.Equal(t, testCase.expectedLabels, ns.Labels) - }) - } -} - -func newPlacement(name, location, synctarget string) *schedulingv1alpha1.Placement { - placement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: schedulingv1alpha1.PlacementSpec{ - NamespaceSelector: &metav1.LabelSelector{}, - }, - Status: schedulingv1alpha1.PlacementStatus{ - SelectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: location, - }, - }, - } - - if len(synctarget) > 0 { - placement.Annotations = map[string]string{ - workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey: workloadv1alpha1.ToSyncTargetKey("", synctarget), - } - } - - return placement -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile_status.go b/pkg/reconciler/workload/namespace/namespace_reconcile_status.go deleted file mode 100644 index 21e3f730320..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile_status.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -const ( - // NamespaceScheduled represents status of the scheduling process for this namespace. - NamespaceScheduled conditionsv1alpha1.ConditionType = "NamespaceScheduled" - // NamespaceReasonUnschedulable reason in NamespaceScheduled Namespace Condition - // means that the scheduler can't schedule the namespace right now, e.g. due to a - // lack of ready clusters being available. - NamespaceReasonUnschedulable = "Unschedulable" - // NamespaceReasonSchedulingDisabled reason in NamespaceScheduled Namespace Condition - // means that the automated scheduling for this namespace is disabled, e.g., when it's - // labelled with ScheduleDisabledLabel. - NamespaceReasonSchedulingDisabled = "SchedulingDisabled" - // NamespaceReasonPlacementInvalid reason in NamespaceScheduled Namespace Condition - // means the placement annotation has invalid value. - NamespaceReasonPlacementInvalid = "PlacementInvalid" -) - -// reconcileStatus ensures the status of the given namespace reflects the -// namespace's scheduled state. -func (c *controller) reconcileStatus(_ context.Context, _ string, ns *corev1.Namespace) (reconcileResult, error) { - conditionsAdapter := &NamespaceConditionsAdapter{ns} - - _, found := ns.Annotations[schedulingv1alpha1.PlacementAnnotationKey] - if !found { - conditions.MarkFalse(conditionsAdapter, NamespaceScheduled, NamespaceReasonUnschedulable, - conditionsv1alpha1.ConditionSeverityNone, // NamespaceCondition doesn't support severity - "No available placements") - return reconcileResult{}, nil - } - - syncStatus := syncStatusFor(ns) - if len(syncStatus.active) == 0 { - conditions.MarkFalse(conditionsAdapter, NamespaceScheduled, NamespaceReasonUnschedulable, - conditionsv1alpha1.ConditionSeverityNone, // NamespaceCondition doesn't support severity - "No available sync targets") - return reconcileResult{}, nil - } - - conditions.MarkTrue(conditionsAdapter, NamespaceScheduled) - return reconcileResult{}, nil -} - -// NamespaceConditionsAdapter enables the use of the conditions helper -// library with Namespaces. -type NamespaceConditionsAdapter struct { - *corev1.Namespace -} - -func (ca *NamespaceConditionsAdapter) GetConditions() conditionsv1alpha1.Conditions { - conditions := conditionsv1alpha1.Conditions{} - for _, c := range ca.Status.Conditions { - conditions = append(conditions, conditionsv1alpha1.Condition{ - Type: conditionsv1alpha1.ConditionType(c.Type), - Status: c.Status, - // Default to None because NamespaceCondition lacks a Severity field - Severity: conditionsv1alpha1.ConditionSeverityNone, - LastTransitionTime: c.LastTransitionTime, - Reason: c.Reason, - Message: c.Message, - }) - } - return conditions -} - -func (ca *NamespaceConditionsAdapter) SetConditions(conditions conditionsv1alpha1.Conditions) { - nsConditions := []corev1.NamespaceCondition{} - for _, c := range conditions { - nsConditions = append(nsConditions, corev1.NamespaceCondition{ - Type: corev1.NamespaceConditionType(c.Type), - Status: c.Status, - // Severity is ignored - LastTransitionTime: c.LastTransitionTime, - Reason: c.Reason, - Message: c.Message, - }) - } - ca.Status.Conditions = nsConditions -} diff --git a/pkg/reconciler/workload/namespace/namespace_reconcile_status_test.go b/pkg/reconciler/workload/namespace/namespace_reconcile_status_test.go deleted file mode 100644 index b06de43996d..00000000000 --- a/pkg/reconciler/workload/namespace/namespace_reconcile_status_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsapi "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestSetScheduledCondition(t *testing.T) { - testCases := map[string]struct { - labels map[string]string - annotations map[string]string - scheduled bool - reason conditionsapi.ConditionType - }{ - "scheduled": { - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + "cluster1": string(workloadv1alpha1.ResourceStateSync), - }, - scheduled: true, - }, - "unschedulable": { - reason: NamespaceReasonUnschedulable, - }, - "no clusters": { - annotations: map[string]string{ - schedulingv1alpha1.PlacementAnnotationKey: "", - }, - reason: NamespaceReasonUnschedulable, - }, - } - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Labels: testCase.labels, - Annotations: testCase.annotations, - }, - } - - c := &controller{} - result, err := c.reconcileStatus(context.Background(), "key", ns) - require.NoError(t, err) - require.False(t, result.stop) - require.Zero(t, result.requeueAfter) - - if !testCase.scheduled && testCase.reason == "" { - c := conditions.Get(&NamespaceConditionsAdapter{ns}, NamespaceScheduled) - require.Nil(t, c) - } else { - c := conditions.Get(&NamespaceConditionsAdapter{ns}, NamespaceScheduled) - require.NotNil(t, c) - scheduled := c.Status == corev1.ConditionTrue - require.Equal(t, testCase.scheduled, scheduled, "unexpected value for scheduled") - if len(testCase.reason) > 0 { - require.Equal(t, string(testCase.reason), c.Reason, "unexpected reason") - } - } - }) - } -} diff --git a/pkg/reconciler/workload/placement/placement_controller.go b/pkg/reconciler/workload/placement/placement_controller.go deleted file mode 100644 index 0624614d4fc..00000000000 --- a/pkg/reconciler/workload/placement/placement_controller.go +++ /dev/null @@ -1,398 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/core" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" - apisinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1" - corev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/core/v1alpha1" - schedulinginformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" - workloadinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - apislisters "github.com/kcp-dev/kcp/sdk/client/listers/apis/v1alpha1" - corev1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/core/v1alpha1" - schedulingv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/scheduling/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-workload-placement" - bySelectedLocationPath = ControllerName + "-bySelectedLocationPath" -) - -// NewController returns a new controller starting the process of selecting synctarget for a placement. -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - logicalClusterInformer corev1alpha1informers.LogicalClusterClusterInformer, - locationInformer, globalLocationInformer schedulinginformers.LocationClusterInformer, - syncTargetInformer, globalSyncTargetInformer workloadinformers.SyncTargetClusterInformer, - placementInformer schedulinginformers.PlacementClusterInformer, - apiBindingInformer apisinformers.APIBindingClusterInformer, -) (*controller, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) - - c := &controller{ - queue: queue, - - kcpClusterClient: kcpClusterClient, - - logicalClusterLister: logicalClusterInformer.Lister(), - - locationLister: locationInformer.Lister(), - locationIndexer: locationInformer.Informer().GetIndexer(), - - listLocations: func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Location, error) { - locations, err := locationInformer.Lister().Cluster(clusterName).List(labels.Everything()) - if err != nil || len(locations) == 0 { - return globalLocationInformer.Lister().Cluster(clusterName).List(labels.Everything()) - } - - return locations, nil - }, - - syncTargetLister: syncTargetInformer.Lister(), - - listSyncTargets: func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - targets, err := syncTargetInformer.Lister().Cluster(clusterName).List(labels.Everything()) - if err != nil || len(targets) == 0 { - return globalSyncTargetInformer.Lister().Cluster(clusterName).List(labels.Everything()) - } - return targets, nil - }, - - getLocation: func(path logicalcluster.Path, name string) (*schedulingv1alpha1.Location, error) { - return indexers.ByPathAndNameWithFallback[*schedulingv1alpha1.Location](schedulingv1alpha1.Resource("locations"), locationInformer.Informer().GetIndexer(), globalLocationInformer.Informer().GetIndexer(), path, name) - }, - - placementLister: placementInformer.Lister(), - placementIndexer: placementInformer.Informer().GetIndexer(), - - apiBindingLister: apiBindingInformer.Lister(), - - commit: committer.NewCommitter[*Placement, Patcher, *PlacementSpec, *PlacementStatus](kcpClusterClient.SchedulingV1alpha1().Placements()), - } - - if err := placementInformer.Informer().AddIndexers(cache.Indexers{ - bySelectedLocationPath: indexBySelectedLocationPath, - }); err != nil { - return nil, err - } - - indexers.AddIfNotPresentOrDie(locationInformer.Informer().GetIndexer(), cache.Indexers{ - indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName, - }) - - indexers.AddIfNotPresentOrDie(globalLocationInformer.Informer().GetIndexer(), cache.Indexers{ - indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName, - }) - - logger := logging.WithReconciler(klog.Background(), ControllerName) - - _, _ = locationInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueLocation(obj, logger) }, - UpdateFunc: func(old, obj interface{}) { - oldLoc := old.(*schedulingv1alpha1.Location) - newLoc := obj.(*schedulingv1alpha1.Location) - if !reflect.DeepEqual(oldLoc.Spec, newLoc.Spec) || !reflect.DeepEqual(oldLoc.Labels, newLoc.Labels) { - c.enqueueLocation(obj, logger) - } - }, - DeleteFunc: func(obj interface{}) { c.enqueueLocation(obj, logger) }, - }, - ) - - _, _ = syncTargetInformer.Informer().AddEventHandler( - cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueSyncTarget(obj, logger) }, - UpdateFunc: func(old, obj interface{}) { - oldCluster := old.(*workloadv1alpha1.SyncTarget) - oldClusterCopy := *oldCluster - - // ignore fields that scheduler does not care - oldClusterCopy.ResourceVersion = "0" - oldClusterCopy.Status.LastSyncerHeartbeatTime = nil - oldClusterCopy.Status.VirtualWorkspaces = nil - oldClusterCopy.Status.Capacity = nil - - newCluster := obj.(*workloadv1alpha1.SyncTarget) - newClusterCopy := *newCluster - newClusterCopy.ResourceVersion = "0" - newClusterCopy.Status.LastSyncerHeartbeatTime = nil - newClusterCopy.Status.VirtualWorkspaces = nil - newClusterCopy.Status.Capacity = nil - - // compare ignoring heart-beat - if !reflect.DeepEqual(oldClusterCopy, newClusterCopy) { - c.enqueueSyncTarget(obj, logger) - } - }, - DeleteFunc: func(obj interface{}) { c.enqueueSyncTarget(obj, logger) }, - }, - ) - - _, _ = placementInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueuePlacement(obj, logger) }, - UpdateFunc: func(_, obj interface{}) { c.enqueuePlacement(obj, logger) }, - DeleteFunc: func(obj interface{}) { c.enqueuePlacement(obj, logger) }, - }) - - _, _ = apiBindingInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueAPIBinding(obj, logger) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueAPIBinding(obj, logger) }, - DeleteFunc: func(obj interface{}) { c.enqueueAPIBinding(obj, logger) }, - }) - - return c, nil -} - -type Placement = schedulingv1alpha1.Placement -type PlacementSpec = schedulingv1alpha1.PlacementSpec -type PlacementStatus = schedulingv1alpha1.PlacementStatus -type Patcher = schedulingv1alpha1client.PlacementInterface -type Resource = committer.Resource[*PlacementSpec, *PlacementStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -// controller. -type controller struct { - queue workqueue.RateLimitingInterface - - kcpClusterClient kcpclientset.ClusterInterface - - logicalClusterLister corev1alpha1listers.LogicalClusterClusterLister - - locationLister schedulingv1alpha1listers.LocationClusterLister - locationIndexer cache.Indexer - - listLocations func(clusterName logicalcluster.Name) ([]*schedulingv1alpha1.Location, error) - getLocation func(path logicalcluster.Path, name string) (*schedulingv1alpha1.Location, error) - - syncTargetLister workloadv1alpha1listers.SyncTargetClusterLister - - listSyncTargets func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) - - placementLister schedulingv1alpha1listers.PlacementClusterLister - placementIndexer cache.Indexer - - apiBindingLister apislisters.APIBindingClusterLister - commit CommitFunc -} - -// enqueueLocation finds placement ref to this location at first, and then namespaces bound to this placement. -func (c *controller) enqueueLocation(obj interface{}, logger logr.Logger) { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - } - - location, ok := obj.(*schedulingv1alpha1.Location) - if !ok { - runtime.HandleError(fmt.Errorf("unexpected object type: %T", obj)) - return - } - - // placements referencing by cluster name - placements, err := c.placementIndexer.ByIndex(bySelectedLocationPath, logicalcluster.From(location).String()) - if err != nil { - runtime.HandleError(err) - return - } - if path := location.Annotations[core.LogicalClusterPathAnnotationKey]; path != "" { - // placements referencing by path - placementsByPath, err := c.placementIndexer.ByIndex(bySelectedLocationPath, path) - if err != nil { - runtime.HandleError(err) - return - } - placements = append(placements, placementsByPath...) - } - - logger = logger.WithValues(logging.FromPrefix("locationReason", location)...) - for _, placement := range placements { - c.enqueuePlacement(placement, logger) - } -} - -func (c *controller) enqueuePlacement(obj interface{}, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info("queueing Placement") - c.queue.Add(key) -} - -func (c *controller) enqueueAPIBinding(obj interface{}, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - clusterName, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return - } - - placements, err := c.placementLister.Cluster(clusterName).List(labels.Everything()) - if err != nil { - runtime.HandleError(err) - return - } - - logger = logger.WithValues(logging.FromPrefix("apiBindingReason", obj.(*apisv1alpha1.APIBinding))...) - - for _, placement := range placements { - c.enqueuePlacement(placement, logger) - } -} - -func (c *controller) enqueueSyncTarget(obj interface{}, logger logr.Logger) { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - } - - syncTarget, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - runtime.HandleError(fmt.Errorf("unexpected object type: %T", obj)) - return - } - - // Get all locations in the same cluster and enqueue locations. - locations, err := c.listLocations(logicalcluster.From(syncTarget)) - if err != nil { - runtime.HandleError(err) - return - } - - logger = logger.WithValues(logging.FromPrefix("syncTargetReason", syncTarget)...) - - for _, location := range locations { - c.enqueueLocation(location, logger) - } -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if requeue, err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } else if requeue { - // only requeue if we didn't error, but we still want to requeue - c.queue.Add(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) (bool, error) { - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return false, nil - } - obj, err := c.placementLister.Cluster(clusterName).Get(name) - if err != nil { - if errors.IsNotFound(err) { - return false, nil // object deleted before we handled it - } - return false, err - } - old := obj - obj = obj.DeepCopy() - - logger := logging.WithObject(klog.FromContext(ctx), obj) - ctx = klog.NewContext(ctx, logger) - - var errs []error - requeue, err := c.reconcile(ctx, obj) - if err != nil { - errs = append(errs, err) - } - - oldResource := &Resource{ObjectMeta: old.ObjectMeta, Spec: &old.Spec, Status: &old.Status} - newResource := &Resource{ObjectMeta: obj.ObjectMeta, Spec: &obj.Spec, Status: &obj.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return requeue, utilerrors.NewAggregate(errs) -} diff --git a/pkg/reconciler/workload/placement/placement_indexes.go b/pkg/reconciler/workload/placement/placement_indexes.go deleted file mode 100644 index 3398c644655..00000000000 --- a/pkg/reconciler/workload/placement/placement_indexes.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "fmt" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -func indexBySelectedLocationPath(obj interface{}) ([]string, error) { - placement, ok := obj.(*schedulingv1alpha1.Placement) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a Placement, but is %T", obj) - } - - if placement.Status.SelectedLocation == nil { - return []string{}, nil - } - - return []string{placement.Status.SelectedLocation.Path}, nil -} diff --git a/pkg/reconciler/workload/placement/placement_reconcile.go b/pkg/reconciler/workload/placement/placement_reconcile.go deleted file mode 100644 index 42758ad9901..00000000000 --- a/pkg/reconciler/workload/placement/placement_reconcile.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - utilserrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type reconcileStatus int - -const ( - reconcileStatusStopAndRequeue reconcileStatus = iota - reconcileStatusContinue -) - -type reconciler interface { - reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) (reconcileStatus, *schedulingv1alpha1.Placement, error) -} - -func (c *controller) reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) (bool, error) { - reconcilers := []reconciler{ - &placementSchedulingReconciler{ - listSyncTargets: c.listSyncTargets, - getLocation: c.getLocation, - patchPlacement: c.patchPlacement, - listWorkloadAPIBindings: c.listWorkloadAPIBindings, - }, - } - - var errs []error - - requeue := false - for _, r := range reconcilers { - var err error - var status reconcileStatus - status, placement, err = r.reconcile(ctx, placement) - if err != nil { - errs = append(errs, err) - } - if status == reconcileStatusStopAndRequeue { - requeue = true - break - } - } - - return requeue, utilserrors.NewAggregate(errs) -} - -func (c *controller) patchPlacement(ctx context.Context, clusterName logicalcluster.Path, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*schedulingv1alpha1.Placement, error) { - logger := klog.FromContext(ctx) - logger.WithValues("patch", string(data)).V(2).Info("patching Placement") - return c.kcpClusterClient.Cluster(clusterName).SchedulingV1alpha1().Placements().Patch(ctx, name, pt, data, opts, subresources...) -} - -// listWorkloadAPIBindings list all compute apibindings. -func (c *controller) listWorkloadAPIBindings(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIBinding, error) { - apiBindings, err := c.apiBindingLister.Cluster(clusterName).List(labels.Everything()) - if err != nil { - return nil, err - } - - var filteredAPIBinding []*apisv1alpha1.APIBinding - for _, apiBinding := range apiBindings { - if _, ok := apiBinding.Annotations[workloadv1alpha1.ComputeAPIExportAnnotationKey]; ok { - filteredAPIBinding = append(filteredAPIBinding, apiBinding) - } - } - return filteredAPIBinding, nil -} diff --git a/pkg/reconciler/workload/placement/placement_reconcile_scheduling.go b/pkg/reconciler/workload/placement/placement_reconcile_scheduling.go deleted file mode 100644 index c6ebb4ef7f0..00000000000 --- a/pkg/reconciler/workload/placement/placement_reconcile_scheduling.go +++ /dev/null @@ -1,201 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "encoding/json" - "fmt" - "math/rand" - "strings" - - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog/v2" - - locationreconciler "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/location" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// placementSchedulingReconciler schedules placments according to the selected locations. -// It considers only valid SyncTargets and updates the internal.workload.kcp.io/synctarget -// annotation with the selected one on the placement object. -type placementSchedulingReconciler struct { - listSyncTargets func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) - listWorkloadAPIBindings func(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIBinding, error) - getLocation func(path logicalcluster.Path, name string) (*schedulingv1alpha1.Location, error) - patchPlacement func(ctx context.Context, clusterName logicalcluster.Path, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*schedulingv1alpha1.Placement, error) -} - -func (r *placementSchedulingReconciler) reconcile(ctx context.Context, placement *schedulingv1alpha1.Placement) (reconcileStatus, *schedulingv1alpha1.Placement, error) { - clusterName := logicalcluster.From(placement) - - // 1. get current scheduled - expectedAnnotations := map[string]interface{}{} // nil means to remove the key - currentScheduled, foundScheduled := placement.Annotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey] - - // 2. pick all valid synctargets in this placements - validSyncTargets, reason, message, err := r.getAllValidSyncTargetsForPlacement(ctx, placement) - if err != nil { - return reconcileStatusStopAndRequeue, placement, err - } - - // no valid synctarget, clean the annotation. - if len(validSyncTargets) == 0 { - if foundScheduled { - expectedAnnotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey] = nil - updated, err := r.patchPlacementAnnotation(ctx, clusterName.Path(), placement, expectedAnnotations) - return reconcileStatusContinue, updated, err - } - conditions.MarkFalse(placement, schedulingv1alpha1.PlacementScheduled, reason, conditionsv1alpha1.ConditionSeverityWarning, message) - return reconcileStatusContinue, placement, nil - } - - // 2. do nothing if scheduled cluster is in the valid clusters - if foundScheduled { - for _, syncTarget := range validSyncTargets { - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name) - if syncTargetKey != currentScheduled { - continue - } - conditions.MarkTrue(placement, schedulingv1alpha1.PlacementScheduled) - return reconcileStatusContinue, placement, nil - } - } - - // 3. randomly select one as the scheduled cluster - // TODO(qiujian16): we currently schedule each in each location independently. It cannot guarantee 1 cluster is scheduled per location - // when the same synctargets are in multiple locations, we need to rethink whether we need a better algorithm or we need location - // to be exclusive. - scheduledSyncTarget := validSyncTargets[rand.Intn(len(validSyncTargets))] - expectedAnnotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey] = workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(scheduledSyncTarget), scheduledSyncTarget.Name) - updated, err := r.patchPlacementAnnotation(ctx, clusterName.Path(), placement, expectedAnnotations) - return reconcileStatusStopAndRequeue, updated, err -} - -func (r *placementSchedulingReconciler) getAllValidSyncTargetsForPlacement(ctx context.Context, placement *schedulingv1alpha1.Placement) ([]*workloadv1alpha1.SyncTarget, string, string, error) { - if placement.Status.Phase == schedulingv1alpha1.PlacementPending || placement.Status.SelectedLocation == nil { - return nil, schedulingv1alpha1.ScheduleLocationNotFound, "No selected location is scheduled", nil - } - - locationWorkspace := logicalcluster.NewPath(placement.Status.SelectedLocation.Path) - location, err := r.getLocation( - locationWorkspace, - placement.Status.SelectedLocation.LocationName) - switch { - case errors.IsNotFound(err): - return nil, schedulingv1alpha1.ScheduleLocationNotFound, "Selected location is not found", nil - case err != nil: - return nil, "", "", err - } - - // find all synctargets in the location workspace - syncTargets, err := r.listSyncTargets(logicalcluster.From(location)) - if err != nil { - return nil, "", "", err - } - - // filter the SyncTargets by location - locationSyncTargets, err := locationreconciler.LocationSyncTargets(syncTargets, location) - if len(locationSyncTargets) == 0 || err != nil { - return nil, schedulingv1alpha1.ScheduleNoValidTargetReason, "No SyncTarget in the selected Location", err - } - - // filter the SyncTargets by APIs - validSyncTargets, message, err := r.filterAPICompatible(ctx, placement, locationSyncTargets) - if len(validSyncTargets) == 0 || err != nil { - return nil, schedulingv1alpha1.ScheduleNoValidTargetReason, message, err - } - - // filter the SyncTargets by status. - validSyncTargets = locationreconciler.FilterNonEvicting(locationreconciler.FilterReady(validSyncTargets)) - if len(validSyncTargets) == 0 { - return validSyncTargets, schedulingv1alpha1.ScheduleNoValidTargetReason, "No SyncTarget is ready or non evicting", nil - } - - return validSyncTargets, "", "", nil -} - -func (r *placementSchedulingReconciler) filterAPICompatible(ctx context.Context, placement *schedulingv1alpha1.Placement, syncTargets []*workloadv1alpha1.SyncTarget) ([]*workloadv1alpha1.SyncTarget, string, error) { - logger := klog.FromContext(ctx) - var filteredSyncTargets []*workloadv1alpha1.SyncTarget - - apiBindings, err := r.listWorkloadAPIBindings(logicalcluster.From(placement)) - if err != nil { - return filteredSyncTargets, "", err - } - - var messages []string - for _, syncTargert := range syncTargets { - supportedAPIMap := map[apisv1alpha1.GroupResource]workloadv1alpha1.ResourceToSync{} - for _, resource := range syncTargert.Status.SyncedResources { - if resource.State == workloadv1alpha1.ResourceSchemaAcceptedState { - supportedAPIMap[resource.GroupResource] = resource - } - } - - supported := true - for _, binding := range apiBindings { - for _, desiredAPI := range binding.Status.BoundResources { - supportedAPI, ok := supportedAPIMap[apisv1alpha1.GroupResource{ - Group: desiredAPI.Group, - Resource: desiredAPI.Resource, - }] - if !ok || supportedAPI.IdentityHash != desiredAPI.Schema.IdentityHash { - supported = false - messages = append(messages, fmt.Sprintf("SyncTarget %s does not support APIBinding %s", syncTargert.Name, binding.Name)) - logger.V(4).Info("Does not support APIBindings", "workspace", logicalcluster.From(placement), "APIBinding", binding.Name, "syncTarget", syncTargert.Name) - break - } - } - } - - if supported { - filteredSyncTargets = append(filteredSyncTargets, syncTargert) - } - } - - return filteredSyncTargets, strings.Join(messages, ", "), nil -} - -func (r *placementSchedulingReconciler) patchPlacementAnnotation(ctx context.Context, clusterName logicalcluster.Path, placement *schedulingv1alpha1.Placement, annotations map[string]interface{}) (*schedulingv1alpha1.Placement, error) { - logger := klog.FromContext(ctx) - patch := map[string]interface{}{} - if len(annotations) > 0 { - if err := unstructured.SetNestedField(patch, annotations, "metadata", "annotations"); err != nil { - return placement, err - } - } - patchBytes, err := json.Marshal(patch) - if err != nil { - return placement, err - } - logger.WithValues("patch", string(patchBytes)).V(3).Info("patching Placement to update SyncTarget information") - updated, err := r.patchPlacement(ctx, clusterName, placement.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) - if err != nil { - return placement, err - } - return updated, nil -} diff --git a/pkg/reconciler/workload/placement/placement_reconcile_scheduling_test.go b/pkg/reconciler/workload/placement/placement_reconcile_scheduling_test.go deleted file mode 100644 index 96116d7fe15..00000000000 --- a/pkg/reconciler/workload/placement/placement_reconcile_scheduling_test.go +++ /dev/null @@ -1,343 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package placement - -import ( - "context" - "encoding/json" - "testing" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsapi "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestSchedulingReconcile(t *testing.T) { - testCases := []struct { - name string - - placement *schedulingv1alpha1.Placement - location *schedulingv1alpha1.Location - syncTargets []*workloadv1alpha1.SyncTarget - apiBindings []*apisv1alpha1.APIBinding - - wantPatch bool - expectedAnnotations map[string]string - }{ - { - name: "no location", - placement: newPlacement("test", "test-location", ""), - }, - { - name: "no synctarget", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - }, - { - name: "schedule one synctarget", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{newSyncTarget("c1", true)}, - wantPatch: true, - expectedAnnotations: map[string]string{ - workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey: "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa", - }, - }, - { - name: "synctarget scheduled", - placement: newPlacement("test", "test-location", "c1"), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{newSyncTarget("c1", true)}, - expectedAnnotations: map[string]string{ - workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey: "aQtdeEWVcqU7h7AKnYMm3KRQ96U4oU2W04yeOa", - }, - }, - { - name: "unschedule synctarget", - placement: newPlacement("test", "test-location", "c1"), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{newSyncTarget("c1", false)}, - wantPatch: true, - expectedAnnotations: map[string]string{}, - }, - { - name: "reschedule synctarget", - placement: newPlacement("test", "test-location", "c1"), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{newSyncTarget("c1", false), newSyncTarget("c2", true)}, - wantPatch: true, - expectedAnnotations: map[string]string{ - workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey: "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4", - }, - }, - { - name: "schedule to syncTarget with compatible APIs", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{ - newSyncTarget("c1", true, workloadv1alpha1.ResourceToSync{GroupResource: apisv1alpha1.GroupResource{Resource: "services"}, State: workloadv1alpha1.ResourceSchemaIncompatibleState}), - newSyncTarget("c2", true, workloadv1alpha1.ResourceToSync{GroupResource: apisv1alpha1.GroupResource{Resource: "services"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}), - }, - apiBindings: []*apisv1alpha1.APIBinding{ - newAPIBinding("kubernetes", apisv1alpha1.BoundAPIResource{Resource: "services"}), - }, - wantPatch: true, - expectedAnnotations: map[string]string{ - workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey: "aPkhvUbGK0xoZIjMnM2pA0AuV1g7i4tBwxu5m4", - }, - }, - { - name: "no syncTarget has compatible APIs", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{ - newSyncTarget("c1", true, workloadv1alpha1.ResourceToSync{GroupResource: apisv1alpha1.GroupResource{Resource: "services"}, State: workloadv1alpha1.ResourceSchemaIncompatibleState}), - newSyncTarget("c2", true), - }, - apiBindings: []*apisv1alpha1.APIBinding{ - newAPIBinding("kubernetes", apisv1alpha1.BoundAPIResource{Resource: "services"}), - }, - wantPatch: false, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - listSyncTargets := func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - return testCase.syncTargets, nil - } - getLocation := func(clusterName logicalcluster.Path, name string) (*schedulingv1alpha1.Location, error) { - if testCase.location == nil { - return nil, errors.NewNotFound(schema.GroupResource{}, name) - } - return testCase.location, nil - } - var patched bool - patchPlacement := func(ctx context.Context, clusterName logicalcluster.Path, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*schedulingv1alpha1.Placement, error) { - patched = true - nsData, _ := json.Marshal(testCase.placement) - updatedData, err := jsonpatch.MergePatch(nsData, data) - if err != nil { - return nil, err - } - - var patchedPlacement schedulingv1alpha1.Placement - err = json.Unmarshal(updatedData, &patchedPlacement) - if err != nil { - return testCase.placement, err - } - return &patchedPlacement, err - } - listWorkloadAPIBindings := func(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIBinding, error) { - return testCase.apiBindings, nil - } - reconciler := &placementSchedulingReconciler{ - listSyncTargets: listSyncTargets, - getLocation: getLocation, - patchPlacement: patchPlacement, - listWorkloadAPIBindings: listWorkloadAPIBindings, - } - - _, updated, err := reconciler.reconcile(context.TODO(), testCase.placement) - require.NoError(t, err) - require.Equal(t, testCase.wantPatch, patched) - require.Equal(t, testCase.expectedAnnotations, updated.Annotations) - }) - } -} - -func TestReconcileStatusConditions(t *testing.T) { - testCases := []struct { - name string - - placement *schedulingv1alpha1.Placement - location *schedulingv1alpha1.Location - syncTargets []*workloadv1alpha1.SyncTarget - apiBindings []*apisv1alpha1.APIBinding - - wantStatus corev1.ConditionStatus - wantStausReason string - wantMessage string - }{ - { - name: "no location", - placement: newPlacement("test", "test-location", ""), - wantStatus: corev1.ConditionFalse, - wantStausReason: schedulingv1alpha1.ScheduleLocationNotFound, - wantMessage: "Selected location is not found", - }, - { - name: "no synctarget", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - wantStatus: corev1.ConditionFalse, - wantStausReason: schedulingv1alpha1.ScheduleNoValidTargetReason, - wantMessage: "No SyncTarget in the selected Location", - }, - { - name: "synctarget scheduled", - placement: newPlacement("test", "test-location", "c1"), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{newSyncTarget("c1", true)}, - wantStatus: corev1.ConditionTrue, - }, - { - name: "synctarget is not ready", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{newSyncTarget("c1", false)}, - wantStatus: corev1.ConditionFalse, - wantStausReason: schedulingv1alpha1.ScheduleNoValidTargetReason, - wantMessage: "No SyncTarget is ready or non evicting", - }, - { - name: "no syncTarget has compatible APIs", - placement: newPlacement("test", "test-location", ""), - location: newLocation("test-location"), - syncTargets: []*workloadv1alpha1.SyncTarget{ - newSyncTarget("c1", true, workloadv1alpha1.ResourceToSync{GroupResource: apisv1alpha1.GroupResource{Resource: "services"}, State: workloadv1alpha1.ResourceSchemaIncompatibleState}), - newSyncTarget("c2", true), - }, - apiBindings: []*apisv1alpha1.APIBinding{ - newAPIBinding("kubernetes", apisv1alpha1.BoundAPIResource{Resource: "services"}), - }, - wantStatus: corev1.ConditionFalse, - wantStausReason: schedulingv1alpha1.ScheduleNoValidTargetReason, - wantMessage: "SyncTarget c1 does not support APIBinding kubernetes, SyncTarget c2 does not support APIBinding kubernetes", - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - listSyncTargets := func(clusterName logicalcluster.Name) ([]*workloadv1alpha1.SyncTarget, error) { - return testCase.syncTargets, nil - } - getLocation := func(clusterName logicalcluster.Path, name string) (*schedulingv1alpha1.Location, error) { - if testCase.location == nil { - return nil, errors.NewNotFound(schema.GroupResource{}, name) - } - return testCase.location, nil - } - patchPlacement := func(ctx context.Context, clusterName logicalcluster.Path, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*schedulingv1alpha1.Placement, error) { - nsData, _ := json.Marshal(testCase.placement) - updatedData, err := jsonpatch.MergePatch(nsData, data) - if err != nil { - return nil, err - } - - var patchedPlacement schedulingv1alpha1.Placement - err = json.Unmarshal(updatedData, &patchedPlacement) - if err != nil { - return testCase.placement, err - } - return &patchedPlacement, err - } - listWorkloadAPIBindings := func(clusterName logicalcluster.Name) ([]*apisv1alpha1.APIBinding, error) { - return testCase.apiBindings, nil - } - reconciler := &placementSchedulingReconciler{ - listSyncTargets: listSyncTargets, - getLocation: getLocation, - patchPlacement: patchPlacement, - listWorkloadAPIBindings: listWorkloadAPIBindings, - } - - _, updated, err := reconciler.reconcile(context.TODO(), testCase.placement) - require.NoError(t, err) - c := conditions.Get(updated, schedulingv1alpha1.PlacementScheduled) - require.NotNil(t, c) - require.Equal(t, testCase.wantStatus, c.Status) - require.Equal(t, testCase.wantStausReason, c.Reason) - require.Equal(t, testCase.wantMessage, c.Message) - }) - } -} - -func newPlacement(name, location, synctarget string) *schedulingv1alpha1.Placement { - placement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: schedulingv1alpha1.PlacementSpec{ - NamespaceSelector: &metav1.LabelSelector{}, - }, - Status: schedulingv1alpha1.PlacementStatus{ - SelectedLocation: &schedulingv1alpha1.LocationReference{ - LocationName: location, - }, - }, - } - - if len(synctarget) > 0 { - placement.Annotations = map[string]string{ - workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey: workloadv1alpha1.ToSyncTargetKey("", synctarget), - } - } - - return placement -} - -func newLocation(name string) *schedulingv1alpha1.Location { - return &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: schedulingv1alpha1.LocationSpec{ - InstanceSelector: &metav1.LabelSelector{}, - }, - } -} - -func newSyncTarget(name string, ready bool, resources ...workloadv1alpha1.ResourceToSync) *workloadv1alpha1.SyncTarget { - syncTarget := &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - SyncedResources: resources, - }, - } - - if ready { - conditions.MarkTrue(syncTarget, conditionsapi.ReadyCondition) - } - - return syncTarget -} - -func newAPIBinding(name string, resources ...apisv1alpha1.BoundAPIResource) *apisv1alpha1.APIBinding { - return &apisv1alpha1.APIBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Status: apisv1alpha1.APIBindingStatus{ - BoundResources: resources, - }, - } -} diff --git a/pkg/reconciler/workload/replicateclusterrole/replicateclusterrole_controller.go b/pkg/reconciler/workload/replicateclusterrole/replicateclusterrole_controller.go deleted file mode 100644 index ddef5687c34..00000000000 --- a/pkg/reconciler/workload/replicateclusterrole/replicateclusterrole_controller.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package replicateclusterrole - -import ( - kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/util/sets" - - "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterroles" - "github.com/kcp-dev/kcp/sdk/apis/workload" -) - -const ( - ControllerName = "kcp-workloads-replicate-clusterrole" -) - -// NewController returns a new controller for labelling ClusterRole that should be replicated. -func NewController( - kubeClusterClient kcpkubernetesclientset.ClusterInterface, - clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, - clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, -) labelclusterroles.Controller { - return labelclusterroles.NewController( - ControllerName, - workload.GroupName, - HasSyncRule, - func(clusterName logicalcluster.Name, crb *rbacv1.ClusterRoleBinding) bool { return false }, - kubeClusterClient, - clusterRoleInformer, - clusterRoleBindingInformer, - ) -} - -func HasSyncRule(clusterName logicalcluster.Name, cr *rbacv1.ClusterRole) bool { - for _, rule := range cr.Rules { - apiGroups := sets.New[string](rule.APIGroups...) - if !apiGroups.Has(workload.GroupName) && !apiGroups.Has("*") { - continue - } - resources := sets.New[string](rule.Resources...) - verbs := sets.New[string](rule.Verbs...) - if (resources.Has("synctargets") || resources.Has("*")) && (verbs.Has("sync") || verbs.Has("*")) { - return true - } - } - return false -} diff --git a/pkg/reconciler/workload/replicateclusterrolebinding/replicateclusterrolebinding_controller.go b/pkg/reconciler/workload/replicateclusterrolebinding/replicateclusterrolebinding_controller.go deleted file mode 100644 index 4ce3942f1a9..00000000000 --- a/pkg/reconciler/workload/replicateclusterrolebinding/replicateclusterrolebinding_controller.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package replicateclusterrolebinding - -import ( - kcprbacinformers "github.com/kcp-dev/client-go/informers/rbac/v1" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - - rbacv1 "k8s.io/api/rbac/v1" - - "github.com/kcp-dev/kcp/pkg/reconciler/cache/labelclusterrolebindings" - "github.com/kcp-dev/kcp/pkg/reconciler/workload/replicateclusterrole" - "github.com/kcp-dev/kcp/sdk/apis/workload" -) - -const ( - ControllerName = "kcp-workloads-replicate-clusterrolebinding" -) - -// NewController returns a new controller for labelling ClusterRoleBinding that should be replicated. -func NewController( - kubeClusterClient kcpkubernetesclientset.ClusterInterface, - clusterRoleBindingInformer kcprbacinformers.ClusterRoleBindingClusterInformer, - clusterRoleInformer kcprbacinformers.ClusterRoleClusterInformer, -) labelclusterrolebindings.Controller { - return labelclusterrolebindings.NewController( - ControllerName, - workload.GroupName, - replicateclusterrole.HasSyncRule, - func(clusterName logicalcluster.Name, crb *rbacv1.ClusterRoleBinding) bool { return false }, - kubeClusterClient, - clusterRoleBindingInformer, - clusterRoleInformer, - ) -} diff --git a/pkg/reconciler/workload/replicatelogicalcluster/replicatelogicalcluster_controller.go b/pkg/reconciler/workload/replicatelogicalcluster/replicatelogicalcluster_controller.go deleted file mode 100644 index 1e1704fe710..00000000000 --- a/pkg/reconciler/workload/replicatelogicalcluster/replicatelogicalcluster_controller.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package replicatelogicalcluster - -import ( - "fmt" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" - - "github.com/kcp-dev/kcp/pkg/reconciler/cache/labellogicalcluster" - "github.com/kcp-dev/kcp/pkg/reconciler/cache/replication" - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/workload" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - corev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/core/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-workload-replicate-logicalcluster" -) - -// NewController returns a new controller for labelling LogicalClusters that should be replicated. - -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - logicalClusterInformer corev1alpha1informers.LogicalClusterClusterInformer, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, -) labellogicalcluster.Controller { - logicalClusterLister := logicalClusterInformer.Lister() - syncTargetIndexer := syncTargetInformer.Informer().GetIndexer() - - c := labellogicalcluster.NewController( - ControllerName, - workload.GroupName, - func(cluster *corev1alpha1.LogicalCluster) bool { - // If there are any SyncTargets for this logical cluster, then the LogicalCluster object should be replicated. - keys, err := syncTargetIndexer.IndexKeys(kcpcache.ClusterIndexName, kcpcache.ClusterIndexKey(logicalcluster.From(cluster))) - if err != nil { - runtime.HandleError(fmt.Errorf("failed to list SyncTargets: %v", err)) - return false - } - return len(keys) > 0 - }, - kcpClusterClient, - logicalClusterInformer, - ) - - // enqueue the logical cluster every time the APIExport changes - enqueueSyncTarget := func(obj interface{}) { - if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = tombstone.Obj - } - - syncTarget, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - runtime.HandleError(fmt.Errorf("unexpected object type: %T", obj)) - return - } - - cluster, err := logicalClusterLister.Cluster(logicalcluster.From(syncTarget)).Get(corev1alpha1.LogicalClusterName) - if err != nil && !apierrors.IsNotFound(err) { - runtime.HandleError(fmt.Errorf("failed to get logical cluster: %v", err)) - return - } else if apierrors.IsNotFound(err) { - return - } - - c.EnqueueLogicalCluster(cluster, "reason", "SyncTarget changed", "synctarget", syncTarget.Name) - } - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: replication.IsNoSystemClusterName, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - enqueueSyncTarget(obj) - }, - DeleteFunc: func(obj interface{}) { - enqueueSyncTarget(obj) - }, - }, - }) - - return c -} diff --git a/pkg/reconciler/workload/resource/resource_controller.go b/pkg/reconciler/workload/resource/resource_controller.go deleted file mode 100644 index 7cfb1dfc4af..00000000000 --- a/pkg/reconciler/workload/resource/resource_controller.go +++ /dev/null @@ -1,567 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "fmt" - "reflect" - "strings" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - kcpcorev1informers "github.com/kcp-dev/client-go/informers/core/v1" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/endpoints/handlers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/apis/apiexport" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - schedulingv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-workload-resource-scheduler" - bySyncTargetKey = ControllerName + "bySyncTargetKey" -) - -// NewController returns a new Controller which schedules resources in scheduled namespaces. -func NewController( - dynamicClusterClient kcpdynamic.ClusterInterface, - ddsif *informer.DiscoveringDynamicSharedInformerFactory, - syncTargetInformer, globalSyncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, - namespaceInformer kcpcorev1informers.NamespaceClusterInformer, - placementInformer schedulingv1alpha1informers.PlacementClusterInformer, -) (*Controller, error) { - resourceQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kcp-namespace-resource") - gvrQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "kcp-namespace-gvr") - - c := &Controller{ - resourceQueue: resourceQueue, - gvrQueue: gvrQueue, - - dynClusterClient: dynamicClusterClient, - - getNamespace: func(clusterName logicalcluster.Name, namespaceName string) (*corev1.Namespace, error) { - return namespaceInformer.Lister().Cluster(clusterName).Get(namespaceName) - }, - - getSyncTargetPlacementAnnotations: func(clusterName logicalcluster.Name) (sets.Set[string], error) { - placements, err := placementInformer.Lister().Cluster(clusterName).List(labels.Everything()) - if err != nil { - return nil, err - } - - expectedSyncTargetKeys := sets.New[string]() - for _, placement := range placements { - if val := placement.Annotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey]; val != "" { - expectedSyncTargetKeys.Insert(val) - } - } - return expectedSyncTargetKeys, err - }, - - getSyncTargetFromKey: func(syncTargetKey string) (*workloadv1alpha1.SyncTarget, bool, error) { - syncTargets, err := indexers.ByIndexWithFallback[*workloadv1alpha1.SyncTarget](syncTargetInformer.Informer().GetIndexer(), - globalSyncTargetInformer.Informer().GetIndexer(), bySyncTargetKey, syncTargetKey) - if err != nil { - return nil, false, err - } - if len(syncTargets) == 0 { - return nil, false, nil - } - if len(syncTargets) > 1 { - return nil, false, fmt.Errorf("possible collision: multiple sync targets found for key %q", syncTargetKey) - } - return syncTargets[0], true, nil - }, - - ddsif: ddsif, - } - - _, _ = namespaceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: filterNamespace, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueNamespace(obj) }, - UpdateFunc: func(old, obj interface{}) { - oldNS := old.(*corev1.Namespace) - newNS := obj.(*corev1.Namespace) - if !reflect.DeepEqual(scheduleStateLabels(oldNS.Labels), scheduleStateLabels(newNS.Labels)) || - !reflect.DeepEqual(scheduleStateAnnotations(oldNS.Annotations), scheduleStateAnnotations(newNS.Annotations)) { - c.enqueueNamespace(obj) - } - }, - DeleteFunc: nil, // Nothing to do. - }, - }) - - _, _ = placementInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: c.enqueuePlacement, - UpdateFunc: func(oldObj, _ interface{}) { c.enqueuePlacement(oldObj) }, - DeleteFunc: c.enqueuePlacement, - }) - - ddsif.AddEventHandler(informer.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { c.enqueueResource(gvr, obj) }, - UpdateFunc: func(gvr schema.GroupVersionResource, _, obj interface{}) { c.enqueueResource(gvr, obj) }, - DeleteFunc: nil, // Nothing to do. - }) - - indexers.AddIfNotPresentOrDie(syncTargetInformer.Informer().GetIndexer(), cache.Indexers{ - bySyncTargetKey: indexBySyncTargetKey, - }) - - indexers.AddIfNotPresentOrDie(globalSyncTargetInformer.Informer().GetIndexer(), cache.Indexers{ - bySyncTargetKey: indexBySyncTargetKey, - }) - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - DeleteFunc: func(obj interface{}) { - c.enqueueSyncTarget(obj) - }, - }) - - _, _ = globalSyncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - DeleteFunc: func(obj interface{}) { - c.enqueueSyncTarget(obj) - }, - }) - - return c, nil -} - -func scheduleStateLabels(ls map[string]string) map[string]string { - ret := make(map[string]string, len(ls)) - for k, v := range ls { - if strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - ret[k] = v - } - } - return ret -} - -func scheduleStateAnnotations(ls map[string]string) map[string]string { - ret := make(map[string]string, len(ls)) - for k, v := range ls { - if strings.HasPrefix(k, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix) { - ret[k] = v - } - } - return ret -} - -type Controller struct { - resourceQueue workqueue.RateLimitingInterface - gvrQueue workqueue.RateLimitingInterface - - dynClusterClient kcpdynamic.ClusterInterface - - getNamespace func(clusterName logicalcluster.Name, namespaceName string) (*corev1.Namespace, error) - getSyncTargetPlacementAnnotations func(clusterName logicalcluster.Name) (sets.Set[string], error) - getSyncTargetFromKey func(syncTargetKey string) (*workloadv1alpha1.SyncTarget, bool, error) - - ddsif *informer.DiscoveringDynamicSharedInformerFactory -} - -func filterNamespace(obj interface{}) bool { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return false - } - _, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return false - } - if namespaceBlocklist.Has(name) { - logging.WithReconciler(klog.Background(), ControllerName).WithValues("namespace", name).V(2).Info("skipping syncing Namespace") - return false - } - return true -} - -func (c *Controller) enqueueResource(gvr schema.GroupVersionResource, obj interface{}) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - queueKey := strings.Join([]string{gvr.Resource, gvr.Version, gvr.Group}, ".") + "::" + key - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), queueKey) - logger.V(2).Info("queueing resource") - c.resourceQueue.Add(queueKey) -} - -func (c *Controller) enqueueGVR(gvr schema.GroupVersionResource) { - queueKey := strings.Join([]string{gvr.Resource, gvr.Version, gvr.Group}, ".") - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), queueKey) - logger.V(2).Info("queueing GVR") - c.gvrQueue.Add(queueKey) -} - -func (c *Controller) enqueueNamespace(obj interface{}) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return - } - ns, err := c.getNamespace(clusterName, name) - if err != nil { - if errors.IsNotFound(err) { - // Namespace was deleted - return - } - - runtime.HandleError(err) - return - } - if err := c.enqueueResourcesForNamespace(ns); err != nil { - runtime.HandleError(err) - return - } -} - -func (c *Controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.resourceQueue.ShutDown() - defer c.gvrQueue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.Until(func() { c.startResourceWorker(ctx) }, time.Second, ctx.Done()) - go wait.Until(func() { c.startGVRWorker(ctx) }, time.Second, ctx.Done()) - } - <-ctx.Done() -} - -func (c *Controller) startResourceWorker(ctx context.Context) { - for processNext(ctx, c.resourceQueue, c.processResource) { - } -} -func (c *Controller) startGVRWorker(ctx context.Context) { - for processNext(ctx, c.gvrQueue, c.processGVR) { - } -} - -func processNext( - ctx context.Context, - queue workqueue.RateLimitingInterface, - processFunc func(ctx context.Context, key string) error, -) bool { - // Wait until there is a new item in the working queue - k, quit := queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer queue.Done(key) - - if err := processFunc(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", ControllerName, key, err)) - queue.AddRateLimited(key) - return true - } - queue.Forget(key) - return true -} - -// key is gvr::KEY. -func (c *Controller) processResource(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - parts := strings.SplitN(key, "::", 2) - if len(parts) != 2 { - logger.Info("error parsing key; dropping") - return nil - } - gvrstr := parts[0] - logger = logger.WithValues("gvr", gvrstr) - gvr, _ := schema.ParseResourceArg(gvrstr) - if gvr == nil { - logger.Info("error parsing GVR; dropping") - return nil - } - key = parts[1] - logger = logger.WithValues("objectKey", key) - - inf, err := c.ddsif.ForResource(*gvr) - if err != nil { - return err - } - - lclusterName, namespace, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "failed to split key, dropping") - return nil - } - obj, err := inf.Lister().ByCluster(lclusterName).ByNamespace(namespace).Get(name) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - logger.Error(err, "error getting object from indexer") - return err - } - unstr, ok := obj.(*unstructured.Unstructured) - if !ok { - logger.WithValues("objectType", fmt.Sprintf("%T", obj)).Info("object was not Unstructured, dropping") - return nil - } - unstr = unstr.DeepCopy() - - actualGVR := gvr - if actualVersion := unstr.GetAnnotations()[handlers.KCPOriginalAPIVersionAnnotation]; actualVersion != "" { - actualGV, err := schema.ParseGroupVersion(actualVersion) - if err != nil { - logger.Error(err, "error parsing original API version annotation", "annotation", actualVersion) - // Returning an error and reprocessing will presumably result in the same parse error, so just return - // nil here. - return nil - } - actualGVR.Version = actualGV.Version - logger.V(4).Info("using actual API version from annotation", "actual", actualVersion) - } - - return c.reconcileResource(ctx, lclusterName, unstr, actualGVR) -} - -func (c *Controller) processGVR(ctx context.Context, gvrstr string) error { - logger := klog.FromContext(ctx).WithValues("gvr", gvrstr) - gvr, _ := schema.ParseResourceArg(gvrstr) - if gvr == nil { - logger.Info("error parsing GVR; dropping") - return nil - } - return c.reconcileGVR(*gvr) -} - -// namespaceBlocklist holds a set of namespaces that should never be synced from kcp to physical clusters. -var namespaceBlocklist = sets.New[string]("kube-system", "kube-public", "kube-node-lease", apiexport.DefaultIdentitySecretNamespace) - -// enqueueResourcesForNamespace adds the resources contained by the given -// namespace to the queue if there scheduling label differs from the namespace's. -func (c *Controller) enqueueResourcesForNamespace(ns *corev1.Namespace) error { - logger := logging.WithObject(logging.WithReconciler(klog.Background(), ControllerName), ns).WithValues("operation", "enqueueResourcesForNamespace") - clusterName := logicalcluster.From(ns) - - nsLocations := getLocations(ns.Labels, true) - nsDeleting := getDeletingLocations(ns.Annotations) - logger = logger.WithValues("nsLocations", sets.List[string](nsLocations)) - - logger.V(4).Info("getting listers") - informers, notSynced := c.ddsif.Informers() - var errs []error - for gvr, informer := range informers { - logger := logger.WithValues("gvr", gvr.String()) - objs, err := informer.Lister().ByCluster(clusterName).ByNamespace(ns.Name).List(labels.Everything()) - if err != nil { - errs = append(errs, fmt.Errorf("error listing %q in %s|%s: %w", gvr, clusterName, ns.Name, err)) - continue - } - - logger.WithValues("items", len(objs)).V(4).Info("got items for GVR") - - var enqueuedResources []string - for _, obj := range objs { - u := obj.(*unstructured.Unstructured) - - objLocations := getLocations(u.GetLabels(), false) - objDeleting := getDeletingLocations(u.GetAnnotations()) - logger := logging.WithObject(logger, u).WithValues("gvk", gvr.GroupVersion().WithKind(u.GetKind())) - if !objLocations.Equal(nsLocations) || !reflect.DeepEqual(objDeleting, nsDeleting) { - c.enqueueResource(gvr, obj) - - if len(enqueuedResources) < 10 { - enqueuedResources = append(enqueuedResources, u.GetName()) - } - - logger.V(3).Info("enqueuing object to schedule it") - } else { - logger.V(4).Info("skipping object as it is already correctly scheduled") - } - } - - if len(enqueuedResources) > 0 { - if len(enqueuedResources) == 10 { - enqueuedResources = append(enqueuedResources, "...") - } - logger.WithValues("resources", enqueuedResources).V(2).Info("enqueuing resources for GVR") - } - } - - // For all types whose informer hasn't synced yet, enqueue a workqueue - // item to check that GVR again later (reconcileGVR, above). - for _, gvr := range notSynced { - logger.V(3).Info("informer for GVR is not synced, needed for namespace; re-enqueueing") - c.enqueueGVR(gvr) - } - - return utilerrors.NewAggregate(errs) -} - -func (c *Controller) enqueueSyncTarget(obj interface{}) { - metaObj, ok := obj.(metav1.Object) - if !ok { - runtime.HandleError(fmt.Errorf("object is not a metav1.Object: %T", obj)) - return - } - clusterName := logicalcluster.From(metaObj) - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(clusterName, metaObj.GetName()) - - c.enqueueSyncTargetKey(syncTargetKey) -} - -func (c *Controller) enqueueSyncTargetKey(syncTargetKey string) { - logger := logging.WithReconciler(klog.Background(), ControllerName).WithValues("syncTargetKey", syncTargetKey) - - informers, _ := c.ddsif.Informers() - queued := map[string]int{} - for gvr := range informers { - inf, err := c.ddsif.ForResource(gvr) - if err != nil { - runtime.HandleError(err) - continue - } - stateLabelObjs, err := inf.Informer().GetIndexer().ByIndex(indexers.ByClusterResourceStateLabelKey, workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey) - if err != nil { - runtime.HandleError(err) - continue - } - syncerFinalizerObjs, err := inf.Informer().GetIndexer().ByIndex(indexers.BySyncerFinalizerKey, shared.SyncerFinalizerNamePrefix+syncTargetKey) - if err != nil { - runtime.HandleError(err) - continue - } - - // let's deduplicate the objects from both indexes. - inObjs := make(map[types.UID]bool) - var objs []interface{} - for _, obj := range append(stateLabelObjs, syncerFinalizerObjs...) { - obj, ok := obj.(*unstructured.Unstructured) - if !ok { - runtime.HandleError(fmt.Errorf("object is not an *unstructured.Unstructured: %T", obj)) - continue - } - if _, ok := inObjs[obj.GetUID()]; !ok { - inObjs[obj.GetUID()] = true - objs = append(objs, obj) - } - } - - if len(objs) == 0 { - continue - } - for _, obj := range objs { - c.enqueueResource(gvr, obj) - } - queued[gvr.String()] = len(objs) - } - if len(queued) > 0 { - logger.WithValues("syncTargetKey", syncTargetKey, "resources", queued).V(2).Info("queued GVRs assigned to a syncTargetKey because SyncTarget or Placement changed.") - } -} - -// getLocations returns a set with of all the locations extracted from a resource labels, setting skipPending to true will ignore resources in not Sync state. -func getLocations(labels map[string]string, skipPending bool) sets.Set[string] { - locations := sets.New[string]() - for k, v := range labels { - if strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) && (!skipPending || v == string(workloadv1alpha1.ResourceStateSync)) { - locations.Insert(strings.TrimPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix)) - } - } - return locations -} - -// getDeletingLocations returns a map of synctargetkeys that are being deleted with the value being the deletion timestamp. -func getDeletingLocations(annotations map[string]string) map[string]string { - deletingLocations := make(map[string]string) - for k, v := range annotations { - if strings.HasPrefix(k, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix) { - deletingLocations[strings.TrimPrefix(k, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix)] = v - } - } - return deletingLocations -} - -func (c *Controller) enqueuePlacement(obj interface{}) { - _, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - placement, ok := obj.(*schedulingv1alpha1.Placement) - if !ok { - runtime.HandleError(fmt.Errorf("expected a Placement, got a %T", obj)) - return - } - syncTargetKey := placement.Annotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey] - if syncTargetKey == "" { - return - } - - c.enqueueSyncTargetKey(syncTargetKey) -} - -func indexBySyncTargetKey(obj interface{}) ([]string, error) { - syncTarget, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a syncTarget, but is %T", obj) - } - - if _, ok := syncTarget.GetLabels()[workloadv1alpha1.InternalSyncTargetKeyLabel]; !ok { - return []string{}, nil - } - - return []string{syncTarget.GetLabels()[workloadv1alpha1.InternalSyncTargetKeyLabel]}, nil -} diff --git a/pkg/reconciler/workload/resource/resource_reconcile.go b/pkg/reconciler/workload/resource/resource_reconcile.go deleted file mode 100644 index ee304f1242c..00000000000 --- a/pkg/reconciler/workload/resource/resource_reconcile.go +++ /dev/null @@ -1,318 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - "github.com/go-logr/logr" - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - syncershared "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// reconcileResource is responsible for setting the cluster for a resource of -// any type, to match the cluster where its namespace is assigned. -func (c *Controller) reconcileResource(ctx context.Context, lclusterName logicalcluster.Name, obj *unstructured.Unstructured, gvr *schema.GroupVersionResource) error { - logger := logging.WithObject(logging.WithReconciler(klog.Background(), ControllerName), obj).WithValues("groupVersionResource", gvr.String(), "logicalCluster", lclusterName.String()) - logger.V(4).Info("reconciling resource") - - // if the resource is a namespace, let's return early. nothing to do. - namespaceGVR := &schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - if gvr == namespaceGVR { - logger.V(5).Info("resource is a namespace; ignoring") - return nil - } - - // If the resource is upsynced, let's check if the synctarget still exists, if not delete the resource. - for k, v := range obj.GetLabels() { - if strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) && v == string(workloadv1alpha1.ResourceStateUpsync) { - syncTargetKey := strings.TrimPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) - _, exists, err := c.getSyncTargetFromKey(syncTargetKey) - if err != nil { - return fmt.Errorf("error reconciling resource %s|%s/%s: error getting synctarget: %w", lclusterName, obj.GetNamespace(), obj.GetName(), err) - } - if !exists { - logger.V(4).Info("synctarget does not exist, deleting resource") - return c.dynClusterClient.Resource(*gvr).Cluster(logicalcluster.From(obj).Path()).Namespace(obj.GetNamespace()).Delete(ctx, obj.GetName(), metav1.DeleteOptions{}) - } - // nothing to do, the resource is upsynced and the synctarget still exists. - return nil - } - } - - var err error - var expectedSyncTargetKeys sets.Set[string] - expectedDeletedSynctargetKeys := make(map[string]string) - namespaceName := obj.GetNamespace() - // We need to handle namespaced and non-namespaced resources differently, as namespaced resources - // will get the locations from its namespace, and non-namespaced will get the locations from all the - // workspace placements. - if namespaceName != "" { - logger := logger.WithValues("namespace", namespaceName) - if namespaceBlocklist.Has(namespaceName) { - logger.V(4).Info("skipping syncing namespace because it is in the block list") - return nil - } - - namespace, err := c.getNamespace(lclusterName, namespaceName) - if apierrors.IsNotFound(err) { - // Namespace was deleted; this resource will eventually get deleted too, so ignore - return nil - } - if err != nil { - return fmt.Errorf("error reconciling resource %s|%s/%s: error getting namespace: %w", lclusterName, namespaceName, obj.GetName(), err) - } - - expectedSyncTargetKeys = getLocations(namespace.GetLabels(), false) - expectedDeletedSynctargetKeys = getDeletingLocations(namespace.GetAnnotations()) - } else { - // We only allow some cluster-wide types of resources. - if !syncershared.SyncableClusterScopedResources.Has(gvr.String()) { - logger.V(5).Info("skipping syncing cluster-scoped resource because it is not in the allowed list of syncable cluster-scoped resources", "name", obj.GetName()) - return nil - } - - logger.Info("reconciling cluster-wide resource", "name", obj.GetName(), "labels", obj.GetLabels()) - - // now we need to calculate the synctargets that need to be deleted. - // we do this by getting the current locations of the resource and - // comparing against the expected locations. - - expectedSyncTargetKeys, err = c.getSyncTargetPlacementAnnotations(logicalcluster.From(obj)) - if err != nil { - logger.Error(err, "error getting valid sync target keys for workspace") - return nil - } - - deletionTimestamp := time.Now().Format(time.RFC3339) - currentLocations := getLocations(obj.GetLabels(), false) - - for _, location := range sets.List[string](currentLocations.Difference(expectedSyncTargetKeys)) { - expectedDeletedSynctargetKeys[location] = deletionTimestamp - } - } - - var annotationPatch, labelPatch map[string]interface{} - - // If the object DeletionTimestamp is set, we should set all locations deletion timestamps annotations to the same value. - if obj.GetDeletionTimestamp() != nil { - annotationPatch = propagateDeletionTimestamp(logger, obj) - } else { - // We only need to compute the new placements if the resource is not being deleted. - annotationPatch, labelPatch = computePlacement(expectedSyncTargetKeys, expectedDeletedSynctargetKeys, obj) - } - - // clean finalizers from removed syncers - filteredFinalizers := make([]string, 0, len(obj.GetFinalizers())) - for _, f := range obj.GetFinalizers() { - logger := logger.WithValues("finalizer", f) - if !strings.HasPrefix(f, syncershared.SyncerFinalizerNamePrefix) { - filteredFinalizers = append(filteredFinalizers, f) - continue - } - - syncTargetKey := strings.TrimPrefix(f, syncershared.SyncerFinalizerNamePrefix) - logger = logger.WithValues("syncTargetKey", syncTargetKey) - _, found, err := c.getSyncTargetFromKey(syncTargetKey) - if err != nil { - logger.Error(err, "error checking if sync target key exists") - continue - } - if !found { - logger.V(3).Info("SyncTarget under the key was deleted, removing finalizer") - continue - } - logger.V(4).Info("SyncTarget under the key still exists, keeping finalizer") - filteredFinalizers = append(filteredFinalizers, f) - } - - // create patch - if len(labelPatch) == 0 && len(annotationPatch) == 0 && len(filteredFinalizers) == len(obj.GetFinalizers()) { - logger.V(4).Info("nothing to change for resource") - return nil - } - - patch := map[string]interface{}{ - "metadata": map[string]interface{}{ - "UID": obj.GetUID(), - "resourceVersion": obj.GetResourceVersion(), - }, - } - if len(labelPatch) > 0 { - if err := unstructured.SetNestedField(patch, labelPatch, "metadata", "labels"); err != nil { - logger.Error(err, "unexpected unstructured error") - return err // should never happen - } - } - if len(annotationPatch) > 0 { - if err := unstructured.SetNestedField(patch, annotationPatch, "metadata", "annotations"); err != nil { - logger.Error(err, "unexpected unstructured error") - return err // should never happen - } - } - if len(filteredFinalizers) != len(obj.GetFinalizers()) { - if err := unstructured.SetNestedStringSlice(patch, filteredFinalizers, "metadata", "finalizers"); err != nil { - return err // should never happen - } - } - patchBytes, err := json.Marshal(patch) - if err != nil { - logger.Error(err, "unexpected marshal error") - return err - } - - logger.WithValues("patch", string(patchBytes)).V(2).Info("patching resource") - if namespaceName != "" { - if _, err := c.dynClusterClient.Resource(*gvr).Cluster(lclusterName.Path()).Namespace(namespaceName).Patch(ctx, obj.GetName(), types.MergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return err - } - return nil - } - - if _, err := c.dynClusterClient.Resource(*gvr).Cluster(lclusterName.Path()).Patch(ctx, obj.GetName(), types.MergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - return err - } - - return nil -} - -func propagateDeletionTimestamp(logger logr.Logger, obj metav1.Object) map[string]interface{} { - logger.V(3).Info("resource is being deleted; setting the deletion per locations timestamps") - objAnnotations := obj.GetAnnotations() - objLocations := getLocations(obj.GetLabels(), false) - annotationPatch := make(map[string]interface{}) - for location := range objLocations { - if val, ok := objAnnotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+location]; !ok || val == "" { - annotationPatch[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+location] = obj.GetDeletionTimestamp().Format(time.RFC3339) - } - } - return annotationPatch -} - -// computePlacement computes the patch against annotations and labels. Nil means to remove the key.ResourceStatePending. -func computePlacement(expectedSyncTargetKeys sets.Set[string], expectedDeletedSynctargetKeys map[string]string, obj metav1.Object) (annotationPatch map[string]interface{}, labelPatch map[string]interface{}) { - currentSynctargetKeys := getLocations(obj.GetLabels(), false) - currentSynctargetKeysDeleting := getDeletingLocations(obj.GetAnnotations()) - if currentSynctargetKeys.Equal(expectedSyncTargetKeys) && reflect.DeepEqual(currentSynctargetKeysDeleting, expectedDeletedSynctargetKeys) { - // already correctly assigned. - return - } - - // create merge patch - annotationPatch = map[string]interface{}{} - labelPatch = map[string]interface{}{} - - // unschedule objects from SyncTargets that are no longer expected. - for _, loc := range sets.List[string](currentSynctargetKeys.Difference(expectedSyncTargetKeys)) { - // That's an inconsistent state, in case of namespaced resources, it's probably due to the namespace deletion reaching its grace period => let's repair it - var hasSyncerFinalizer, hasClusterFinalizer bool - // Check if there's still the syncer or the cluster finalizer. - for _, finalizer := range obj.GetFinalizers() { - if finalizer == syncershared.SyncerFinalizerNamePrefix+loc { - hasSyncerFinalizer = true - } - } - if val, exists := obj.GetAnnotations()[workloadv1alpha1.ClusterFinalizerAnnotationPrefix+loc]; exists && val != "" { - hasClusterFinalizer = true - } - if hasSyncerFinalizer || hasClusterFinalizer { - if _, found := obj.GetAnnotations()[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc]; !found { - annotationPatch[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc] = time.Now().Format(time.RFC3339) - } - } else { - if _, found := obj.GetAnnotations()[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc]; found { - annotationPatch[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc] = nil - } - labelPatch[workloadv1alpha1.ClusterResourceStateLabelPrefix+loc] = nil - } - } - - // sync deletion timestamps if the location is expected to be deleted. - for _, loc := range sets.List[string](expectedSyncTargetKeys.Intersection(currentSynctargetKeys)) { - if expectedTimestamp, ok := expectedDeletedSynctargetKeys[loc]; ok { - if _, ok := currentSynctargetKeysDeleting[loc]; !ok { - annotationPatch[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc] = expectedTimestamp - } - } else { - if _, ok := currentSynctargetKeysDeleting[loc]; ok { - annotationPatch[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc] = nil - } - } - } - - // If the resource is namespaced, set the initial resource state to Sync, otherwise set it to Pending. - // TODO(jmprusi): ResourceStatePending will be the default state once there is a default coordinators for resources. - resourceState := workloadv1alpha1.ResourceStateSync - if obj.GetNamespace() == "" { - resourceState = workloadv1alpha1.ResourceStatePending - } - - // set label on unscheduled objects if resource is scheduled and not deleting - for _, loc := range sets.List[string](expectedSyncTargetKeys.Difference(currentSynctargetKeys)) { - if _, ok := expectedDeletedSynctargetKeys[loc]; ok { - continue - } - // TODO(sttts): add way to go into pending state first, maybe with a namespace annotation - labelPatch[workloadv1alpha1.ClusterResourceStateLabelPrefix+loc] = string(resourceState) - } - - if len(annotationPatch) == 0 { - annotationPatch = nil - } - if len(labelPatch) == 0 { - labelPatch = nil - } - - return -} - -func (c *Controller) reconcileGVR(gvr schema.GroupVersionResource) error { - inf, err := c.ddsif.ForResource(gvr) - if err != nil { - return err - } - if !inf.Informer().HasSynced() { - return fmt.Errorf("informer for %q is not synced; re-enqueueing", gvr) - } - - // Update all resources in the namespaces with cluster assignment. - objs, err := inf.Lister().List(labels.Everything()) - if err != nil { - return err - } - for _, obj := range objs { - c.enqueueResource(gvr, obj) - } - return nil -} diff --git a/pkg/reconciler/workload/resource/resource_reconcile_test.go b/pkg/reconciler/workload/resource/resource_reconcile_test.go deleted file mode 100644 index 6cb16778716..00000000000 --- a/pkg/reconciler/workload/resource/resource_reconcile_test.go +++ /dev/null @@ -1,436 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resource - -import ( - "reflect" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" -) - -func namespace(annotations, labels map[string]string) *corev1.Namespace { - return &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: annotations, - Labels: labels, - }, - } -} - -func object(annotations, labels map[string]string, finalizers []string, deletionTimestamp *metav1.Time, namespace string) metav1.Object { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: annotations, - Labels: labels, - DeletionTimestamp: deletionTimestamp, - Finalizers: finalizers, - Namespace: namespace, - }, - } -} - -func TestComputePlacement(t *testing.T) { - tests := []struct { - name string - ns *corev1.Namespace - obj metav1.Object - wantAnnotationPatch map[string]interface{} // nil means delete - wantLabelPatch map[string]interface{} // nil means delete - }{ - {name: "unscheduled namespace and object", - ns: namespace(nil, nil), - obj: object(nil, nil, nil, nil, "ns"), - }, - {name: "pending namespace, unscheduled object", - ns: namespace(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "", - }), - obj: object(nil, nil, nil, nil, "ns"), - }, - {name: "invalid state value on namespace", - ns: namespace(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "Foo", - }), - obj: object(nil, nil, nil, nil, "ns"), - }, - {name: "syncing namespace, unscheduled object", - ns: namespace(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }), - obj: object(nil, nil, nil, nil, "ns"), - wantLabelPatch: map[string]interface{}{ - "state.workload.kcp.io/cluster-1": "Sync", - }, - }, - {name: "syncing but deleting namespace, unscheduled object, don't schedule the object at all", - ns: namespace(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }), - obj: object(nil, nil, nil, nil, "ns"), - }, - {name: "new location on namespace", - ns: namespace(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - }), - obj: object(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }, nil, nil, "ns"), - wantLabelPatch: map[string]interface{}{ - "state.workload.kcp.io/cluster-2": "Sync", - }, - }, - {name: "new deletion on namespace", - ns: namespace(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-4": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-4": "Sync", - }), - obj: object(nil, map[string]string{ - "state.workload.kcp.io/cluster-4": "Sync", - }, nil, nil, "ns"), - wantLabelPatch: nil, - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-4": "2002-10-02T10:00:00-05:00", - }, - }, - {name: "existing deletion on namespace and object", - ns: namespace(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }, nil, nil, "ns"), - }, - {name: "hard delete after namespace is not scheduled", - ns: namespace(nil, nil), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", // removed hard because namespace is not scheduled - }, nil, nil, "ns"), - wantLabelPatch: map[string]interface{}{ - "state.workload.kcp.io/cluster-3": nil, - }, - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-3": nil, - }, - }, - {name: "no hard delete after namespace is not scheduled due to the resource having a cluster finalizer, expect no patches", - ns: namespace(nil, nil), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - "finalizers.workload.kcp.io/cluster-3": "external-coordinator", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }, nil, nil, "ns"), - wantLabelPatch: nil, - wantAnnotationPatch: nil, - }, - {name: "no hard delete after namespace is not scheduled due to the resource having a syncer finalizer, expect no patches", - ns: namespace(nil, nil), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }, []string{ - "workload.kcp.io/syncer-cluster-3", - }, nil, "ns"), - wantLabelPatch: nil, - wantAnnotationPatch: nil, - }, - {name: "existing deletion on object, hard delete of namespace", - ns: namespace(nil, nil), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }, nil, nil, "ns"), - wantLabelPatch: map[string]interface{}{ - "state.workload.kcp.io/cluster-3": nil, - }, - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-3": nil, - }, - }, - {name: "existing deletion on object, rescheduled namespace", - ns: namespace(nil, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-3": "Sync", - }, nil, nil, "ns"), - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-3": nil, - }, - }, - {name: "multiple locations, added and removed on namespace and object", - ns: namespace(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-4": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-4": "Sync", // deleting - }), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-3": "Sync", // removed hard - "state.workload.kcp.io/cluster-4": "Sync", - }, nil, nil, "ns"), - wantLabelPatch: map[string]interface{}{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-3": nil, - }, - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-4": "2002-10-02T10:00:00-05:00", - "deletion.internal.workload.kcp.io/cluster-3": nil, - }, - }, - {name: "multiple locations, added and removed on namespace and object, object has a cluster finalizer on cluster-3, expect no changes for that cluster", - ns: namespace(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-4": "2002-10-02T10:00:00-05:00", - }, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-4": "Sync", // deleting - }), - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00-05:00", - "finalizers.workload.kcp.io/cluster-3": "external-coordinator", - }, map[string]string{ - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-3": "Sync", - "state.workload.kcp.io/cluster-4": "Sync", - }, nil, nil, "ns"), - wantLabelPatch: map[string]interface{}{ - "state.workload.kcp.io/cluster-1": "Sync", - }, - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-4": "2002-10-02T10:00:00-05:00", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - expectedSynctargetKeys := getLocations(tt.ns.GetLabels(), true) - expectedDeletedSynctargetKeys := getDeletingLocations(tt.ns.GetAnnotations()) - gotAnnotationPatch, gotLabelPatch := computePlacement(expectedSynctargetKeys, expectedDeletedSynctargetKeys, tt.obj) - if diff := cmp.Diff(gotAnnotationPatch, tt.wantAnnotationPatch); diff != "" { - t.Errorf("incorrect annotation patch: %s", diff) - } - if diff := cmp.Diff(gotLabelPatch, tt.wantLabelPatch); diff != "" { - t.Errorf("incorrect label patch: %s", diff) - } - }) - } -} - -func TestPropagateDeletionTimestamp(t *testing.T) { - tests := []struct { - name string - obj metav1.Object - wantAnnotationPatch map[string]interface{} // nil means delete - }{ - {name: "Object is marked for deletion and has one location", - obj: object(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }, nil, &metav1.Time{Time: time.Date(2002, 10, 2, 10, 0, 0, 0, time.UTC)}, "ns"), - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - }, - }, {name: "Object is marked for deletion and has multiple locations", - obj: object(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-3": "Sync", - }, nil, &metav1.Time{Time: time.Date(2002, 10, 2, 10, 0, 0, 0, time.UTC)}, "ns"), - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - "deletion.internal.workload.kcp.io/cluster-2": "2002-10-02T10:00:00Z", - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00Z", - }, - }, - {name: "Object is marked for deletion, has one location with a location deletionTimestamp already set, no change", - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - }, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }, nil, &metav1.Time{Time: time.Date(2002, 10, 2, 10, 0, 0, 0, time.UTC)}, "ns"), - wantAnnotationPatch: map[string]interface{}{}, - }, - {name: "Object is marked for deletion, has one location with a location deletionTimestamp already set, but with different time value, no update", - obj: object(map[string]string{ - "deletion.internal.workload.kcp.io/cluster-1": "2000-01-01 10:00:00 +0000 UTC", - }, map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }, nil, &metav1.Time{Time: time.Date(2002, 10, 2, 10, 0, 0, 0, time.UTC)}, "ns"), - wantAnnotationPatch: map[string]interface{}{}, - }, - {name: "Object is marked for deletion, has one pending location, the deletionTimestamp of that location should be set", - obj: object(nil, map[string]string{ - "state.workload.kcp.io/cluster-1": "", - }, nil, &metav1.Time{Time: time.Date(2002, 10, 2, 10, 0, 0, 0, time.UTC)}, "ns"), - wantAnnotationPatch: map[string]interface{}{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - }, - }, - {name: "Object is marked for deletion and has no locations", - obj: object(nil, nil, nil, &metav1.Time{Time: time.Date(2002, 10, 2, 10, 0, 0, 0, time.UTC)}, "ns"), - wantAnnotationPatch: map[string]interface{}{}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotAnnotationPatch := propagateDeletionTimestamp(klog.Background(), tt.obj) - if diff := cmp.Diff(gotAnnotationPatch, tt.wantAnnotationPatch); diff != "" { - t.Errorf("incorrect annotation patch: %s", diff) - } - }) - } -} - -func TestGetLocations(t *testing.T) { - tests := []struct { - name string - labels map[string]string - skipPending bool - wantKeys []string - }{ - {name: "No locations", - labels: map[string]string{}, - skipPending: false, - wantKeys: []string{}, - }, - {name: "One location", - labels: map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - }, - skipPending: false, - wantKeys: []string{"cluster-1"}, - }, - {name: "Multiple locations", - labels: map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-3": "Sync", - }, - skipPending: false, - wantKeys: []string{"cluster-1", "cluster-2", "cluster-3"}, - }, - {name: "Multiple locations, some pending, skipPending false", - labels: map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-3": "Sync", - "state.workload.kcp.io/cluster-4": "", - "state.workload.kcp.io/cluster-5": "", - "state.workload.kcp.io/cluster-6": "", - }, - skipPending: false, - wantKeys: []string{"cluster-1", "cluster-2", "cluster-3", "cluster-4", "cluster-5", "cluster-6"}, - }, - {name: "Multiple locations, some pending, skipPending true", - labels: map[string]string{ - "state.workload.kcp.io/cluster-1": "Sync", - "state.workload.kcp.io/cluster-2": "Sync", - "state.workload.kcp.io/cluster-3": "Sync", - "state.workload.kcp.io/cluster-4": "", - "state.workload.kcp.io/cluster-5": "", - "state.workload.kcp.io/cluster-6": "", - }, - skipPending: true, - wantKeys: []string{"cluster-1", "cluster-2", "cluster-3"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotKeys := getLocations(tt.labels, tt.skipPending); !reflect.DeepEqual(sets.List[string](gotKeys), tt.wantKeys) { - t.Errorf("getLocations() = %v, want %v", gotKeys, tt.wantKeys) - } - }) - } -} - -func TestGetDeletingLocations(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - wantKeys map[string]string - }{ - {name: "No locations", - annotations: map[string]string{}, - wantKeys: map[string]string{}, - }, - {name: "One location", - annotations: map[string]string{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - }, - wantKeys: map[string]string{ - "cluster-1": "2002-10-02T10:00:00Z", - }, - }, - {name: "Multiple locations", - annotations: map[string]string{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - "deletion.internal.workload.kcp.io/cluster-2": "2002-10-02T10:00:00Z", - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00Z", - }, - wantKeys: map[string]string{ - "cluster-1": "2002-10-02T10:00:00Z", - "cluster-2": "2002-10-02T10:00:00Z", - "cluster-3": "2002-10-02T10:00:00Z", - }, - }, - {name: "Multiple locations, other annotations", - annotations: map[string]string{ - "deletion.internal.workload.kcp.io/cluster-1": "2002-10-02T10:00:00Z", - "deletion.internal.workload.kcp.io/cluster-2": "2002-10-02T10:00:00Z", - "deletion.internal.workload.kcp.io/cluster-3": "2002-10-02T10:00:00Z", - "this.is.not.a.deletion/annotation": "2002-10-02T10:00:00Z", - }, - wantKeys: map[string]string{ - "cluster-1": "2002-10-02T10:00:00Z", - "cluster-2": "2002-10-02T10:00:00Z", - "cluster-3": "2002-10-02T10:00:00Z", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotKeys := getDeletingLocations(tt.annotations); !reflect.DeepEqual(gotKeys, tt.wantKeys) { - t.Errorf("getDeletingLocations() = %v, want %v", gotKeys, tt.wantKeys) - } - }) - } -} diff --git a/pkg/reconciler/workload/synctarget/synctarget_controller.go b/pkg/reconciler/workload/synctarget/synctarget_controller.go deleted file mode 100644 index b73e3ce7278..00000000000 --- a/pkg/reconciler/workload/synctarget/synctarget_controller.go +++ /dev/null @@ -1,226 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "time" - - jsonpatch "github.com/evanphx/json-patch" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - corev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/core/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" -) - -const ControllerName = "kcp-synctarget-controller" - -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, - workspaceShardInformer, globalWorkspaceShardInformer corev1alpha1informers.ShardClusterInformer, -) *Controller { - c := &Controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName), - kcpClusterClient: kcpClusterClient, - syncTargetIndexer: syncTargetInformer.Informer().GetIndexer(), - listWorkspaceShards: informer.NewListerWithFallback[*corev1alpha1.Shard](workspaceShardInformer.Lister(), globalWorkspaceShardInformer.Lister()), - } - - // Watch for events related to SyncTargets - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueSyncTarget(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueSyncTarget(obj) }, - DeleteFunc: func(obj interface{}) {}, - }) - - // Watch for events related to workspaceShards - _, _ = workspaceShardInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueWorkspaceShard(obj) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueWorkspaceShard(obj) }, - DeleteFunc: func(obj interface{}) { c.enqueueWorkspaceShard(obj) }, - }) - - return c -} - -type Controller struct { - queue workqueue.RateLimitingInterface - kcpClusterClient kcpclientset.ClusterInterface - - listWorkspaceShards informer.FallbackListFunc[*corev1alpha1.Shard] - syncTargetIndexer cache.Indexer -} - -func (c *Controller) enqueueSyncTarget(obj interface{}) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logger.V(2).Info("queueing SyncTarget") - c.queue.Add(key) -} - -// On workspaceShard changes, enqueue all the syncTargets. -func (c *Controller) enqueueWorkspaceShard(obj interface{}) { - logger := logging.WithObject(logging.WithReconciler(klog.Background(), ControllerName), obj.(*corev1alpha1.Shard)) - for _, syncTarget := range c.syncTargetIndexer.List() { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(syncTarget) - if err != nil { - runtime.HandleError(err) - return - } - logging.WithQueueKey(logger, key).V(2).Info("queueing SyncTarget because of Shard") - c.queue.Add(key) - } -} - -// Start starts the controller workers. -func (c *Controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *Controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *Controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("failed to sync %q: %w", key, err)) - c.queue.AddRateLimited(key) - return true - } - - c.queue.Forget(key) - return true -} - -func (c *Controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - obj, exists, err := c.syncTargetIndexer.GetByKey(key) - if err != nil { - logger.Error(err, "failed to get SyncTarget") - return nil - } - - if !exists { - logger.Info("syncTarget was deleted") - return nil - } - - currentSyncTarget := obj.(*workloadv1alpha1.SyncTarget) - logger = logging.WithObject(klog.FromContext(ctx), currentSyncTarget) - ctx = klog.NewContext(ctx, logger) - - workspacesShards, err := c.listWorkspaceShards(labels.Everything()) - if err != nil { - return err - } - - newSyncTarget, err := c.reconcile(ctx, currentSyncTarget, workspacesShards) - if err != nil { - logger.Error(err, "failed to reconcile syncTarget") - return err - } - - if reflect.DeepEqual(currentSyncTarget, newSyncTarget) { - return nil - } - - currentSyncTargetJSON, err := json.Marshal(currentSyncTarget) - if err != nil { - logger.Error(err, "failed to marshal syncTarget") - return err - } - newSyncTargetJSON, err := json.Marshal(newSyncTarget) - if err != nil { - logger.Error(err, "failed to marshal syncTarget") - return err - } - - patchBytes, err := jsonpatch.CreateMergePatch(currentSyncTargetJSON, newSyncTargetJSON) - if err != nil { - logger.Error(err, "failed to create merge patch for syncTarget") - return err - } - - if !reflect.DeepEqual(currentSyncTarget.ObjectMeta, newSyncTarget.ObjectMeta) || !reflect.DeepEqual(currentSyncTarget.Spec, newSyncTarget.Spec) { - logger.WithValues("patch", string(patchBytes)).V(2).Info("patching SyncTarget") - if _, err := c.kcpClusterClient.Cluster(logicalcluster.From(currentSyncTarget).Path()).WorkloadV1alpha1().SyncTargets().Patch(ctx, currentSyncTarget.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { - logger.Error(err, "failed to patch sync target") - return err - } - } - - if !reflect.DeepEqual(currentSyncTarget.Status, newSyncTarget.Status) { - logger.WithValues("patch", string(patchBytes)).V(2).Info("patching SyncTarget status") - if _, err := c.kcpClusterClient.Cluster(logicalcluster.From(currentSyncTarget).Path()).WorkloadV1alpha1().SyncTargets().Patch(ctx, currentSyncTarget.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil { - logger.Error(err, "failed to patch sync target status") - return err - } - } - - return nil -} diff --git a/pkg/reconciler/workload/synctarget/synctarget_reconcile.go b/pkg/reconciler/workload/synctarget/synctarget_reconcile.go deleted file mode 100644 index 57e04e260e4..00000000000 --- a/pkg/reconciler/workload/synctarget/synctarget_reconcile.go +++ /dev/null @@ -1,125 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "net/url" - "path" - - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/klog/v2" - - virtualworkspacesoptions "github.com/kcp-dev/kcp/cmd/virtual-workspaces/options" - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - syncerbuilder "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/builder" -) - -func (c *Controller) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget, workspaceShards []*corev1alpha1.Shard) (*workloadv1alpha1.SyncTarget, error) { - logger := klog.FromContext(ctx) - syncTargetCopy := syncTarget.DeepCopy() - - labels := syncTargetCopy.GetLabels() - if labels == nil { - labels = map[string]string{} - } - labels[workloadv1alpha1.InternalSyncTargetKeyLabel] = workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTargetCopy), syncTargetCopy.Name) - syncTargetCopy.SetLabels(labels) - - desiredVWURLs := map[string]workloadv1alpha1.VirtualWorkspace{} - desiredTunnelWorkspaceURLs := map[string]workloadv1alpha1.TunnelWorkspace{} - syncTargetClusterName := logicalcluster.From(syncTarget) - - var rootShardKey string - for _, workspaceShard := range workspaceShards { - if workspaceShard.Spec.VirtualWorkspaceURL != "" { - shardVWURL, err := url.Parse(workspaceShard.Spec.VirtualWorkspaceURL) - if err != nil { - logger.Error(err, "failed to parse workspaceShard.Spec.VirtualWorkspaceURL") - return nil, err - } - - syncerVirtualWorkspaceURL := *shardVWURL - syncerVirtualWorkspaceURL.Path = path.Join( - shardVWURL.Path, - virtualworkspacesoptions.DefaultRootPathPrefix, - syncerbuilder.SyncerVirtualWorkspaceName, - logicalcluster.From(syncTargetCopy).String(), - syncTargetCopy.Name, - string(syncTargetCopy.UID), - ) - - upsyncerVirtualWorkspaceURL := *shardVWURL - (&upsyncerVirtualWorkspaceURL).Path = path.Join( - shardVWURL.Path, - virtualworkspacesoptions.DefaultRootPathPrefix, - syncerbuilder.UpsyncerVirtualWorkspaceName, - logicalcluster.From(syncTargetCopy).String(), - syncTargetCopy.Name, - string(syncTargetCopy.UID), - ) - - syncerURL := (&syncerVirtualWorkspaceURL).String() - upsyncerURL := (&upsyncerVirtualWorkspaceURL).String() - - if workspaceShard.Name == corev1alpha1.RootShard { - rootShardKey = shardVWURL.String() - } - desiredVWURLs[shardVWURL.String()] = workloadv1alpha1.VirtualWorkspace{ - SyncerURL: syncerURL, - UpsyncerURL: upsyncerURL, - } - - tunnelWorkspaceURL, err := url.JoinPath(workspaceShard.Spec.BaseURL, syncTargetClusterName.Path().RequestPath()) - if err != nil { - return nil, err - } - desiredTunnelWorkspaceURLs[shardVWURL.String()] = workloadv1alpha1.TunnelWorkspace{ - URL: tunnelWorkspaceURL, - } - } - } - - // Let's always add the desired URLs in the same order: - // - urls for the root shard will always be added at the first place, - // in order to ensure compatibility with the shard-unaware Syncer - // - urls for other shards which will be added in the lexical order of the - // corresponding shard URLs. - var desiredVirtualWorkspaces []workloadv1alpha1.VirtualWorkspace //nolint:prealloc - if rootShardVirtualWorkspace, ok := desiredVWURLs[rootShardKey]; ok { - desiredVirtualWorkspaces = append(desiredVirtualWorkspaces, rootShardVirtualWorkspace) - delete(desiredVWURLs, rootShardKey) - } - for _, shardURL := range sets.StringKeySet(desiredVWURLs).List() { - desiredVirtualWorkspaces = append(desiredVirtualWorkspaces, desiredVWURLs[shardURL]) - } - var desiredTunnelWorkspaces []workloadv1alpha1.TunnelWorkspace //nolint:prealloc - if rootShardTunnelWorkspace, ok := desiredTunnelWorkspaceURLs[rootShardKey]; ok { - desiredTunnelWorkspaces = append(desiredTunnelWorkspaces, rootShardTunnelWorkspace) - delete(desiredTunnelWorkspaceURLs, rootShardKey) - } - for _, shardURL := range sets.StringKeySet(desiredTunnelWorkspaceURLs).List() { - desiredTunnelWorkspaces = append(desiredTunnelWorkspaces, desiredTunnelWorkspaceURLs[shardURL]) - } - - syncTargetCopy.Status.VirtualWorkspaces = desiredVirtualWorkspaces - syncTargetCopy.Status.TunnelWorkspaces = desiredTunnelWorkspaces - return syncTargetCopy, nil -} diff --git a/pkg/reconciler/workload/synctarget/synctarget_reconcile_test.go b/pkg/reconciler/workload/synctarget/synctarget_reconcile_test.go deleted file mode 100644 index c0d92cabd16..00000000000 --- a/pkg/reconciler/workload/synctarget/synctarget_reconcile_test.go +++ /dev/null @@ -1,481 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestReconciler(t *testing.T) { - tests := map[string]struct { - workspaceShards []*corev1alpha1.Shard - syncTarget *workloadv1alpha1.SyncTarget - expectedSyncTarget *workloadv1alpha1.SyncTarget - expectError bool - }{ - "SyncTarget with empty VirtualWorkspaces and one workspaceShards": { - workspaceShards: []*corev1alpha1.Shard{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6443/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace/", - }, - }, - }, - syncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{}, - }, - }, - expectedSyncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - Labels: map[string]string{ - "internal.workload.kcp.io/key": "aPXkBdRsTD8gXESO47r9qXmkr2kaG5qaox5C8r", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{ - { - SyncerURL: "http://virtualworkspace/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - }, - TunnelWorkspaces: []workloadv1alpha1.TunnelWorkspace{ - { - URL: "http://1.2.3.4:6443/clusters/demo:root:yourworkspace", - }, - }, - }, - }, - expectError: false, - }, - "SyncTarget and multiple Shards": { - workspaceShards: []*corev1alpha1.Shard{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root2", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6444/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-2/", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root3", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6445/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-3/", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6443/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-1/", - }, - }, - }, - syncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{}, - }, - }, - expectedSyncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - Labels: map[string]string{ - "internal.workload.kcp.io/key": "aPXkBdRsTD8gXESO47r9qXmkr2kaG5qaox5C8r", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{ - { - SyncerURL: "http://virtualworkspace-1/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-1/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - { - SyncerURL: "http://virtualworkspace-2/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-2/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - { - SyncerURL: "http://virtualworkspace-3/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-3/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - }, - TunnelWorkspaces: []workloadv1alpha1.TunnelWorkspace{ - { - URL: "http://1.2.3.4:6443/clusters/demo:root:yourworkspace", - }, - { - URL: "http://1.2.3.4:6444/clusters/demo:root:yourworkspace", - }, - { - URL: "http://1.2.3.4:6445/clusters/demo:root:yourworkspace", - }, - }, - }, - }, - expectError: false, - }, - "SyncTarget and multiple Shards, but root shard always first": { - workspaceShards: []*corev1alpha1.Shard{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root2", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6444/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-2/", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root3", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6445/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-3/", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6443/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-10/", - }, - }, - }, - syncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{}, - }, - }, - expectedSyncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - Labels: map[string]string{ - "internal.workload.kcp.io/key": "aPXkBdRsTD8gXESO47r9qXmkr2kaG5qaox5C8r", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{ - { - SyncerURL: "http://virtualworkspace-10/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-10/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - { - SyncerURL: "http://virtualworkspace-2/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-2/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - { - SyncerURL: "http://virtualworkspace-3/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-3/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - }, - TunnelWorkspaces: []workloadv1alpha1.TunnelWorkspace{ - { - URL: "http://1.2.3.4:6443/clusters/demo:root:yourworkspace", - }, - { - URL: "http://1.2.3.4:6444/clusters/demo:root:yourworkspace", - }, - { - URL: "http://1.2.3.4:6445/clusters/demo:root:yourworkspace", - }, - }, - }, - }, - expectError: false, - }, - "SyncTarget with multiple Shards with duplicated VirtualWorkspaceURLs results in a deduplicated list of URLs on the SyncTarget": { - workspaceShards: []*corev1alpha1.Shard{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6443/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-1/", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root2", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6443/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-1/", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root3", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6445/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-3/", - }, - }, - }, - syncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{}, - }, - }, - expectedSyncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - Labels: map[string]string{ - "internal.workload.kcp.io/key": "aPXkBdRsTD8gXESO47r9qXmkr2kaG5qaox5C8r", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{ - { - SyncerURL: "http://virtualworkspace-1/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-1/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - - { - SyncerURL: "http://virtualworkspace-3/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-3/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - }, - TunnelWorkspaces: []workloadv1alpha1.TunnelWorkspace{ - {URL: "http://1.2.3.4:6443/clusters/demo:root:yourworkspace"}, - {URL: "http://1.2.3.4:6445/clusters/demo:root:yourworkspace"}, - }, - }, - }, - expectError: false, - }, - "SyncTarget but no Shards": { - workspaceShards: []*corev1alpha1.Shard{}, - syncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{}, - }, - expectedSyncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - Labels: map[string]string{ - "internal.workload.kcp.io/key": "aPXkBdRsTD8gXESO47r9qXmkr2kaG5qaox5C8r", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{}, - }, - expectError: false, - }, - "SyncTarget from three to one Shards": { - workspaceShards: []*corev1alpha1.Shard{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "root", - }, - Spec: corev1alpha1.ShardSpec{ - BaseURL: "http://1.2.3.4:6443/", - ExternalURL: "http://external-host/", - VirtualWorkspaceURL: "http://virtualworkspace-1/", - }, - }, - }, - syncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{ - { - SyncerURL: "http://virtualworkspace-1/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-1/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - { - SyncerURL: "http://virtualworkspace-2/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-2/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - { - SyncerURL: "http://virtualworkspace-3/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-3/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - }, - }, - }, - expectedSyncTarget: &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "demo:root:yourworkspace", - }, - Labels: map[string]string{ - "internal.workload.kcp.io/key": "aPXkBdRsTD8gXESO47r9qXmkr2kaG5qaox5C8r", - }, - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - Unschedulable: false, - EvictAfter: nil, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - VirtualWorkspaces: []workloadv1alpha1.VirtualWorkspace{ - { - SyncerURL: "http://virtualworkspace-1/services/syncer/demo:root:yourworkspace/test-cluster", - UpsyncerURL: "http://virtualworkspace-1/services/upsyncer/demo:root:yourworkspace/test-cluster", - }, - }, - TunnelWorkspaces: []workloadv1alpha1.TunnelWorkspace{ - { - URL: "http://1.2.3.4:6443/clusters/demo:root:yourworkspace", - }, - }, - }, - }, - expectError: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - c := Controller{} - returnedSyncTarget, err := c.reconcile(context.TODO(), tc.syncTarget, tc.workspaceShards) - if err != nil && !tc.expectError { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(returnedSyncTarget, tc.expectedSyncTarget) { - t.Errorf("expected diff: %s", cmp.Diff(tc.expectedSyncTarget, returnedSyncTarget)) - } - }) - } -} diff --git a/pkg/reconciler/workload/synctargetexports/synctarget_indexes.go b/pkg/reconciler/workload/synctargetexports/synctarget_indexes.go deleted file mode 100644 index 0f441a31890..00000000000 --- a/pkg/reconciler/workload/synctargetexports/synctarget_indexes.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctargetexports - -import ( - "fmt" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// indexAPIExportsByAPIResourceSchemasFunc is an index function that maps an APIExport to its spec.latestResourceSchemas. -func indexAPIExportsByAPIResourceSchemas(obj interface{}) ([]string, error) { - apiExport, ok := obj.(*apisv1alpha1.APIExport) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be an APIExport, but is %T", obj) - } - - ret := make([]string, len(apiExport.Spec.LatestResourceSchemas)) - for i := range apiExport.Spec.LatestResourceSchemas { - ret[i] = kcpcache.ToClusterAwareKey(logicalcluster.From(apiExport).Path().String(), "", apiExport.Spec.LatestResourceSchemas[i]) - } - - return ret, nil -} - -func indexSyncTargetsByExports(obj interface{}) ([]string, error) { - synctarget, ok := obj.(*workloadv1alpha1.SyncTarget) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a SyncTarget, but is %T", obj) - } - - clusterName := logicalcluster.From(synctarget) - keys := make([]string, 0, len(synctarget.Spec.SupportedAPIExports)) - for _, export := range synctarget.Spec.SupportedAPIExports { - if len(export.Path) == 0 { - keys = append(keys, clusterName.Path().Join(export.Export).String()) - continue - } - keys = append(keys, logicalcluster.NewPath(export.Path).Join(export.Export).String()) - } - - return keys, nil -} diff --git a/pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile.go b/pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile.go deleted file mode 100644 index cb3e7922b5b..00000000000 --- a/pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctargetexports - -import ( - "context" - - "github.com/kcp-dev/logicalcluster/v3" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/validation/field" - - "github.com/kcp-dev/kcp/pkg/schemacompat" - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// apiCompatibleReconciler sets state for each synced resource based on resource schema and apiimports. -// TODO(qiujian06) this should be done in syncer when resource schema(or crd) is exposed by syncer virtual workspace. -type apiCompatibleReconciler struct { - getAPIExport func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) - getResourceSchema func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) - listAPIResourceImports func(clusterName logicalcluster.Name) ([]*apiresourcev1alpha1.APIResourceImport, error) -} - -func (e *apiCompatibleReconciler) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (*workloadv1alpha1.SyncTarget, error) { - var errs []error - schemaMap := map[schema.GroupVersionResource]*apiextensionsv1.JSONSchemaProps{} - - // Get json schema from all related resource schemas - for _, exportRef := range syncTarget.Spec.SupportedAPIExports { - path := logicalcluster.NewPath(exportRef.Path) - if path.Empty() { - path = logicalcluster.From(syncTarget).Path() - } - export, err := e.getAPIExport(path, exportRef.Export) - if apierrors.IsNotFound(err) { - continue - } - if err != nil { - errs = append(errs, err) - continue - } - - for _, schemaName := range export.Spec.LatestResourceSchemas { - resourceSchema, err := e.getResourceSchema(logicalcluster.From(export), schemaName) - if apierrors.IsNotFound(err) { - continue - } - if err != nil { - errs = append(errs, err) - } - - for _, v := range resourceSchema.Spec.Versions { - jsonSchema, err := v.GetSchema() - if err != nil { - errs = append(errs, err) - continue - } - schemaMap[schema.GroupVersionResource{ - Group: resourceSchema.Spec.Group, - Resource: resourceSchema.Spec.Names.Plural, - Version: v.Name, - }] = jsonSchema - } - } - } - - lcluster := logicalcluster.From(syncTarget) - apiImportMap := map[schema.GroupVersionResource]*apiextensionsv1.JSONSchemaProps{} - apiImports, err := e.listAPIResourceImports(lcluster) - if err != nil { - return syncTarget, err - } - - for _, apiImport := range apiImports { - jsonSchema, err := apiImport.Spec.GetSchema() - if err != nil { - errs = append(errs, err) - continue - } - apiImportMap[schema.GroupVersionResource{ - Group: apiImport.Spec.GroupVersion.Group, - Version: apiImport.Spec.GroupVersion.Version, - Resource: apiImport.Spec.Plural, - }] = jsonSchema - } - - for i, syncedRsesource := range syncTarget.Status.SyncedResources { - for _, v := range syncedRsesource.Versions { - gvr := schema.GroupVersionResource{Group: syncedRsesource.Group, Resource: syncedRsesource.Resource, Version: v} - upstreamSchema, ok := schemaMap[gvr] - if !ok { - syncTarget.Status.SyncedResources[i].State = workloadv1alpha1.ResourceSchemaPendingState - continue - } - - downStreamSchema, ok := apiImportMap[gvr] - if !ok { - syncTarget.Status.SyncedResources[i].State = workloadv1alpha1.ResourceSchemaIncompatibleState - continue - } - - _, err := schemacompat.EnsureStructuralSchemaCompatibility( - field.NewPath(gvr.String()), upstreamSchema, downStreamSchema, false) - if err != nil { - syncTarget.Status.SyncedResources[i].State = workloadv1alpha1.ResourceSchemaIncompatibleState - continue - } - - // since version is ordered, so if the current version is comptaible, we can skip the check on other versions. - syncTarget.Status.SyncedResources[i].State = workloadv1alpha1.ResourceSchemaAcceptedState - break - } - } - - return syncTarget, errors.NewAggregate(errs) -} diff --git a/pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile_test.go b/pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile_test.go deleted file mode 100644 index d5ae8b985f2..00000000000 --- a/pkg/reconciler/workload/synctargetexports/synctargetcompatible_reconcile_test.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctargetexports - -import ( - "context" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestSyncTargetCompatibleReconcile(t *testing.T) { - tests := []struct { - name string - syncTarget *workloadv1alpha1.SyncTarget - export *apisv1alpha1.APIExport - schemas []*apisv1alpha1.APIResourceSchema - apiResourceImport []*apiresourcev1alpha1.APIResourceImport - - wantError bool - wantSyncedResources []workloadv1alpha1.ResourceToSync - }{ - { - name: "pending when missing APIResourceSchema", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, - []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - }, - ), - export: newAPIExport("kubernetes", []string{"apps.v1.deployment"}, ""), - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaPendingState}, - }, - }, - { - name: "incompatible when missing APIResourceImport", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, - []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - }, - ), - export: newAPIExport("kubernetes", []string{"apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{ - { - Name: "v1", - Served: true, - Schema: runtime.RawExtension{Raw: []byte(`{"type":"string"}`)}, - }, - }), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaIncompatibleState}, - }, - }, - { - name: "APIResourceImport compatible with APIResourceSchema", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, - []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaPendingState}, - }, - ), - export: newAPIExport("kubernetes", []string{"apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{ - { - Name: "v1", - Served: true, - Schema: runtime.RawExtension{Raw: []byte(`{"type":"string"}`)}, - }, - }), - }, - apiResourceImport: []*apiresourcev1alpha1.APIResourceImport{ - newAPIResourceImport("apps.v1.deployment", "apps", "deployments", "v1", `{"type":"string"}`), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - }, - }, - { - name: "APIResourceImport incompatible with APIResourceSchema", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, - []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - }, - ), - export: newAPIExport("kubernetes", []string{"apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{ - { - Name: "v1", - Served: true, - Schema: runtime.RawExtension{Raw: []byte(`{"type":"integer"}`)}, - }, - }), - }, - apiResourceImport: []*apiresourcev1alpha1.APIResourceImport{ - newAPIResourceImport("apps.v1.deployment", "apps", "deployments", "v1", `{"type":"string"}`), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaIncompatibleState}, - }, - }, - { - name: "only take care latest version", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, - []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1", "v1beta1"}, State: workloadv1alpha1.ResourceSchemaPendingState}, - }, - ), - export: newAPIExport("kubernetes", []string{"apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{ - { - Name: "v1", - Served: true, - Schema: runtime.RawExtension{Raw: []byte(`{"type":"string"}`)}, - }, - }), - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{ - { - Name: "v1beta1", - Served: true, - Schema: runtime.RawExtension{Raw: []byte(`{"type":"string"}`)}, - }, - }), - }, - apiResourceImport: []*apiresourcev1alpha1.APIResourceImport{ - newAPIResourceImport("apps.v1.deployment", "apps", "deployments", "v1", `{"type":"string"}`), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1", "v1beta1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - getAPIExport := func(clusterName logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) { - if tc.export == nil { - return nil, errors.NewNotFound(schema.GroupResource{}, name) - } - return tc.export, nil - } - getResourceSchema := func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) { - for _, schema := range tc.schemas { - if schema.Name == name { - return schema, nil - } - } - - return nil, errors.NewNotFound(schema.GroupResource{}, name) - } - listAPIResourceImports := func(clusterName logicalcluster.Name) ([]*apiresourcev1alpha1.APIResourceImport, error) { - return tc.apiResourceImport, nil - } - - reconciler := &apiCompatibleReconciler{ - getAPIExport: getAPIExport, - getResourceSchema: getResourceSchema, - listAPIResourceImports: listAPIResourceImports, - } - - updated, err := reconciler.reconcile(context.TODO(), tc.syncTarget) - if tc.wantError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - - require.Equal(t, tc.wantSyncedResources, updated.Status.SyncedResources) - }) - } -} - -func newAPIResourceImport(name, group, resource, version, schema string) *apiresourcev1alpha1.APIResourceImport { - return &apiresourcev1alpha1.APIResourceImport{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiresourcev1alpha1.APIResourceImportSpec{ - CommonAPIResourceSpec: apiresourcev1alpha1.CommonAPIResourceSpec{ - GroupVersion: apiresourcev1alpha1.GroupVersion{ - Group: group, - Version: version, - }, - CustomResourceDefinitionNames: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: resource, - }, - OpenAPIV3Schema: runtime.RawExtension{Raw: []byte(schema)}, - }, - }, - } -} diff --git a/pkg/reconciler/workload/synctargetexports/synctargetexports_controller.go b/pkg/reconciler/workload/synctargetexports/synctargetexports_controller.go deleted file mode 100644 index 3dfa8813d5e..00000000000 --- a/pkg/reconciler/workload/synctargetexports/synctargetexports_controller.go +++ /dev/null @@ -1,349 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctargetexports - -import ( - "context" - "fmt" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/core" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - workloadv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" - apiresourcev1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apiresource/v1alpha1" - apisv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - apiresourcev1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/apiresource/v1alpha1" - apisv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/apis/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-synctarget-export-controller" - - indexSyncTargetsByExport = ControllerName + "ByExport" - indexAPIExportsByAPIResourceSchema = ControllerName + "ByAPIResourceSchema" -) - -// NewController returns a controller which update syncedResource in status based on supportedExports in spec -// of a syncTarget. -func NewController( - kcpClusterClient kcpclientset.ClusterInterface, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, - apiExportInformer, globalAPIExportInformer apisv1alpha1informers.APIExportClusterInformer, - apiResourceSchemaInformer, globalAPIResourceSchemaInformer apisv1alpha1informers.APIResourceSchemaClusterInformer, - apiResourceImportInformer apiresourcev1alpha1informers.APIResourceImportClusterInformer, -) (*Controller, error) { - c := &Controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName), - kcpClusterClient: kcpClusterClient, - syncTargetIndexer: syncTargetInformer.Informer().GetIndexer(), - syncTargetLister: syncTargetInformer.Lister(), - apiExportsIndexer: apiExportInformer.Informer().GetIndexer(), - getAPIExport: func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) { - return indexers.ByPathAndNameWithFallback[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), apiExportInformer.Informer().GetIndexer(), globalAPIExportInformer.Informer().GetIndexer(), path, name) - }, - getAPIResourceSchema: informer.NewScopedGetterWithFallback[*apisv1alpha1.APIResourceSchema, apisv1alpha1listers.APIResourceSchemaLister](apiResourceSchemaInformer.Lister(), globalAPIResourceSchemaInformer.Lister()), - apiImportLister: apiResourceImportInformer.Lister(), - commit: committer.NewCommitter[*SyncTarget, Patcher, *SyncTargetSpec, *SyncTargetStatus](kcpClusterClient.WorkloadV1alpha1().SyncTargets()), - } - - if err := syncTargetInformer.Informer().AddIndexers(cache.Indexers{ - indexSyncTargetsByExport: indexSyncTargetsByExports, - }); err != nil { - return nil, err - } - - indexers.AddIfNotPresentOrDie(apiExportInformer.Informer().GetIndexer(), cache.Indexers{ - indexAPIExportsByAPIResourceSchema: indexAPIExportsByAPIResourceSchemas, - indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName, - }) - - logger := logging.WithReconciler(klog.Background(), ControllerName) - - // Watch for events related to SyncTargets - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueSyncTarget(obj, logger, "") }, - UpdateFunc: func(old, obj interface{}) { - oldCluster := old.(*workloadv1alpha1.SyncTarget) - newCluster := obj.(*workloadv1alpha1.SyncTarget) - - // only enqueue when syncedResource or supportedAPIExported are changed. - if !equality.Semantic.DeepEqual(oldCluster.Spec.SupportedAPIExports, newCluster.Spec.SupportedAPIExports) || - !equality.Semantic.DeepEqual(oldCluster.Status.SyncedResources, newCluster.Status.SyncedResources) { - c.enqueueSyncTarget(obj, logger, "") - } - }, - DeleteFunc: func(obj interface{}) {}, - }) - - _, _ = apiExportInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueAPIExport(obj, logger, "") }, - UpdateFunc: func(_, obj interface{}) { c.enqueueAPIExport(obj, logger, "") }, - DeleteFunc: func(obj interface{}) { c.enqueueAPIExport(obj, logger, "") }, - }) - - _, _ = apiResourceSchemaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueAPIResourceSchema(obj, logger) }, - UpdateFunc: func(_, obj interface{}) { c.enqueueAPIResourceSchema(obj, logger) }, - DeleteFunc: func(obj interface{}) { c.enqueueAPIResourceSchema(obj, logger) }, - }) - - _, _ = apiResourceImportInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - c.enqueueAPIResourceImport(obj, logger) - }, - UpdateFunc: func(old, obj interface{}) { - oldImport := old.(*apiresourcev1alpha1.APIResourceImport) - newImport := obj.(*apiresourcev1alpha1.APIResourceImport) - - // only enqueue when spec is changed. - if oldImport.Generation != newImport.Generation { - c.enqueueAPIResourceImport(obj, logger) - } - }, - DeleteFunc: func(obj interface{}) {}, - }) - - return c, nil -} - -type SyncTarget = workloadv1alpha1.SyncTarget -type SyncTargetSpec = workloadv1alpha1.SyncTargetSpec -type SyncTargetStatus = workloadv1alpha1.SyncTargetStatus -type Patcher = workloadv1alpha1client.SyncTargetInterface -type Resource = committer.Resource[*SyncTargetSpec, *SyncTargetStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -type Controller struct { - queue workqueue.RateLimitingInterface - kcpClusterClient kcpclientset.ClusterInterface - - syncTargetIndexer cache.Indexer - syncTargetLister workloadv1alpha1listers.SyncTargetClusterLister - apiExportsIndexer cache.Indexer - getAPIExport func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) - getAPIResourceSchema informer.ScopedFallbackGetFunc[*apisv1alpha1.APIResourceSchema] - apiImportLister apiresourcev1alpha1listers.APIResourceImportClusterLister - - commit CommitFunc -} - -func (c *Controller) enqueueSyncTarget(obj interface{}, logger logr.Logger, logSuffix string) { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info(fmt.Sprintf("queueing SyncTarget%s", logSuffix)) - c.queue.Add(key) -} - -func (c *Controller) enqueueAPIResourceImport(obj interface{}, logger logr.Logger) { - apiImport, ok := obj.(*apiresourcev1alpha1.APIResourceImport) - if !ok { - runtime.HandleError(fmt.Errorf("obj is supposed to be a APIResourceImport, but is %T", obj)) - return - } - - lcluster := logicalcluster.From(apiImport) - key := kcpcache.ToClusterAwareKey(lcluster.String(), "", apiImport.Spec.Location) - - logging.WithQueueKey(logger, key).V(2).Info(fmt.Sprintf("queueing SyncTarget %q because of APIResourceImport %s", key, apiImport.Name)) - c.queue.Add(key) -} - -func (c *Controller) enqueueAPIExport(obj interface{}, logger logr.Logger, logSuffix string) { - if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = d.Obj - } - - export, ok := obj.(*apisv1alpha1.APIExport) - if !ok { - runtime.HandleError(fmt.Errorf("obj is supposed to be a APIExport, but is %T", obj)) - return - } - - // synctarget keys by full path - keys := sets.New[string]() - if path := export.Annotations[core.LogicalClusterPathAnnotationKey]; path != "" { - pathKeys, err := c.syncTargetIndexer.IndexKeys(indexSyncTargetsByExport, logicalcluster.NewPath(path).Join(export.Name).String()) - if err != nil { - runtime.HandleError(err) - return - } - keys.Insert(pathKeys...) - } - - clusterKeys, err := c.syncTargetIndexer.IndexKeys(indexSyncTargetsByExport, logicalcluster.From(export).Path().Join(export.Name).String()) - if err != nil { - runtime.HandleError(err) - return - } - keys.Insert(clusterKeys...) - - for _, key := range sets.List[string](keys) { - syncTarget, _, err := c.syncTargetIndexer.GetByKey(key) - if err != nil { - runtime.HandleError(err) - continue - } - c.enqueueSyncTarget(syncTarget, logger, fmt.Sprintf(" because of APIExport %s%s", key, logSuffix)) - } -} - -// enqueueAPIResourceSchema maps an APIResourceSchema to APIExports for enqueuing. -func (c *Controller) enqueueAPIResourceSchema(obj interface{}, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - apiExports, err := c.apiExportsIndexer.ByIndex(indexAPIExportsByAPIResourceSchema, key) - if err != nil { - runtime.HandleError(err) - return - } - - for _, obj := range apiExports { - c.enqueueAPIExport(obj, logger, fmt.Sprintf(" because of APIResourceSchema %s", key)) - } -} - -// Start starts the controller workers. -func (c *Controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -func (c *Controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *Controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("failed to sync %q: %w", key, err)) - c.queue.AddRateLimited(key) - return true - } - - c.queue.Forget(key) - return true -} - -func (c *Controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - cluster, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return nil - } - var errs []error - - syncTarget, err := c.syncTargetLister.Cluster(cluster).Get(name) - if err != nil { - logger.Error(err, "failed to get syncTarget") - return nil - } - - currentSyncTarget := syncTarget.DeepCopy() - - logger = logging.WithObject(logger, currentSyncTarget) - ctx = klog.NewContext(ctx, logger) - - exportReconciler := &exportReconciler{ - getAPIExport: c.getAPIExport, - getResourceSchema: c.getAPIResourceSchema, - } - currentSyncTarget, err = exportReconciler.reconcile(ctx, currentSyncTarget) - if err != nil { - errs = append(errs, err) - } - - apiCompatibleReconciler := &apiCompatibleReconciler{ - getAPIExport: c.getAPIExport, - getResourceSchema: c.getAPIResourceSchema, - listAPIResourceImports: c.listAPIResourceImports, - } - currentSyncTarget, err = apiCompatibleReconciler.reconcile(ctx, currentSyncTarget) - if err != nil { - errs = append(errs, err) - } - - // If the object being reconciled changed as a result, update it. - oldResource := &Resource{ObjectMeta: syncTarget.ObjectMeta, Spec: &syncTarget.Spec, Status: &syncTarget.Status} - newResource := &Resource{ObjectMeta: currentSyncTarget.ObjectMeta, Spec: ¤tSyncTarget.Spec, Status: ¤tSyncTarget.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return errors.NewAggregate(errs) -} - -func (c *Controller) listAPIResourceImports(clusterName logicalcluster.Name) ([]*apiresourcev1alpha1.APIResourceImport, error) { - return c.apiImportLister.Cluster(clusterName).List(labels.Everything()) -} diff --git a/pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile.go b/pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile.go deleted file mode 100644 index 259502a9765..00000000000 --- a/pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctargetexports - -import ( - "context" - "sort" - - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// exportReconciler updates syncedResource in SyncTarget status based on supportedAPIExports. -type exportReconciler struct { - getAPIExport func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) - getResourceSchema func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) -} - -func (e *exportReconciler) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (*workloadv1alpha1.SyncTarget, error) { - logger := klog.FromContext(ctx) - var errs []error - var syncedResources []workloadv1alpha1.ResourceToSync - for _, exportRef := range syncTarget.Spec.SupportedAPIExports { - path := logicalcluster.NewPath(exportRef.Path) - if path.Empty() { - path = logicalcluster.From(syncTarget).Path() - } - export, err := e.getAPIExport(path, exportRef.Export) - if apierrors.IsNotFound(err) { - logger.WithValues("APIExport", path.Join(exportRef.Export)).V(4).Info("APIExport not found, skipping") - continue - } - if err != nil { - errs = append(errs, err) - continue - } - - for _, schema := range export.Spec.LatestResourceSchemas { - syncedResource, err := e.convertSchemaToSyncedResource(logicalcluster.From(export), schema, export.Status.IdentityHash) - if err != nil { - logger.WithValues("err", err).Info("cannot get schema") - continue - } - syncedResources = append(syncedResources, syncedResource) - } - } - - // sort synced resource by group - sort.SliceStable(syncedResources, func(i, j int) bool { - if syncedResources[i].Group > syncedResources[j].Group { - return true - } - - if syncedResources[i].Resource > syncedResources[j].Resource { - return true - } - return false - }) - - // merge synced resource using desired as base and update it state based on existing synced. - for _, existingSynced := range syncTarget.Status.SyncedResources { - for i := range syncedResources { - if syncedResources[i].GroupResource == existingSynced.GroupResource && syncedResources[i].IdentityHash == existingSynced.IdentityHash { - syncedResources[i].State = existingSynced.State - break - } - } - } - - syncTarget.Status.SyncedResources = syncedResources - return syncTarget, errors.NewAggregate(errs) -} - -func (e *exportReconciler) convertSchemaToSyncedResource(clusterName logicalcluster.Name, schemaName, identityHash string) (workloadv1alpha1.ResourceToSync, error) { - schema, err := e.getResourceSchema(clusterName, schemaName) - if err != nil { - return workloadv1alpha1.ResourceToSync{}, err - } - - syncedResource := workloadv1alpha1.ResourceToSync{ - GroupResource: apisv1alpha1.GroupResource{ - Group: schema.Spec.Group, - Resource: schema.Spec.Names.Plural, - }, - Versions: []string{}, - IdentityHash: identityHash, - } - - for _, version := range schema.Spec.Versions { - if version.Served { - syncedResource.Versions = append(syncedResource.Versions, version.Name) - } - } - sort.Strings(syncedResource.Versions) - - return syncedResource, nil -} diff --git a/pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile_test.go b/pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile_test.go deleted file mode 100644 index 60ea98879a0..00000000000 --- a/pkg/reconciler/workload/synctargetexports/synctargetexports_reconcile_test.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctargetexports - -import ( - "context" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestSyncTargetExportReconcile(t *testing.T) { - tests := []struct { - name string - syncTarget *workloadv1alpha1.SyncTarget - export *apisv1alpha1.APIExport - schemas []*apisv1alpha1.APIResourceSchema - - wantError bool - wantSyncedResources []workloadv1alpha1.ResourceToSync - }{ - { - name: "export not found", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, nil), - }, - { - name: "resource schemas not found", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, nil), - export: newAPIExport("kubernetes", []string{"v1.service"}, ""), - }, - { - name: "update status correctly", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }, - }, nil), - export: newAPIExport("kubernetes", []string{"v1.service", "apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{{Name: "v1", Served: true}}), - newResourceSchema("v1.service", "", "services", []apisv1alpha1.APIResourceVersion{{Name: "v1", Served: true}}), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}}, - {GroupResource: apisv1alpha1.GroupResource{Group: "", Resource: "services"}, Versions: []string{"v1"}}, - }, - }, - { - name: "update existing", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }}, - []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - {GroupResource: apisv1alpha1.GroupResource{Group: "", Resource: "services"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaPendingState}, - }, - ), - export: newAPIExport("kubernetes", []string{"v1.pod", "apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{{Name: "v1", Served: true}}), - newResourceSchema("v1.pod", "", "pods", []apisv1alpha1.APIResourceVersion{{Name: "v1", Served: true}}), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1"}, State: workloadv1alpha1.ResourceSchemaAcceptedState}, - {GroupResource: apisv1alpha1.GroupResource{Group: "", Resource: "pods"}, Versions: []string{"v1"}}, - }, - }, - { - name: "multiple versions", - syncTarget: newSyncTarget([]tenancyv1alpha1.APIExportReference{ - { - Export: "kubernetes", - }}, - nil, - ), - export: newAPIExport("kubernetes", []string{"apps.v1.deployment"}, ""), - schemas: []*apisv1alpha1.APIResourceSchema{ - newResourceSchema("apps.v1.deployment", "apps", "deployments", []apisv1alpha1.APIResourceVersion{ - {Name: "v1", Served: true}, - {Name: "v1alpha1", Served: false}, - {Name: "v1beta1", Served: true}, - }), - }, - wantSyncedResources: []workloadv1alpha1.ResourceToSync{ - {GroupResource: apisv1alpha1.GroupResource{Group: "apps", Resource: "deployments"}, Versions: []string{"v1", "v1beta1"}}, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - getAPIExport := func(path logicalcluster.Path, name string) (*apisv1alpha1.APIExport, error) { - if tc.export == nil { - return nil, errors.NewNotFound(schema.GroupResource{}, name) - } - return tc.export, nil - } - getResourceSchema := func(clusterName logicalcluster.Name, name string) (*apisv1alpha1.APIResourceSchema, error) { - for _, schema := range tc.schemas { - if schema.Name == name { - return schema, nil - } - } - - return nil, errors.NewNotFound(schema.GroupResource{}, name) - } - - reconciler := &exportReconciler{ - getAPIExport: getAPIExport, - getResourceSchema: getResourceSchema, - } - - updated, err := reconciler.reconcile(context.TODO(), tc.syncTarget) - if tc.wantError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - - require.Equal(t, tc.wantSyncedResources, updated.Status.SyncedResources) - }) - } -} - -func newSyncTarget(exports []tenancyv1alpha1.APIExportReference, syncedResource []workloadv1alpha1.ResourceToSync) *workloadv1alpha1.SyncTarget { - return &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-synctarget", - }, - Spec: workloadv1alpha1.SyncTargetSpec{ - SupportedAPIExports: exports, - }, - Status: workloadv1alpha1.SyncTargetStatus{ - SyncedResources: syncedResource, - }, - } -} - -func newAPIExport(name string, schemas []string, identityHash string) *apisv1alpha1.APIExport { - return &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apisv1alpha1.APIExportSpec{ - LatestResourceSchemas: schemas, - }, - Status: apisv1alpha1.APIExportStatus{ - IdentityHash: identityHash, - }, - } -} - -func newResourceSchema(name, group, resource string, versions []apisv1alpha1.APIResourceVersion) *apisv1alpha1.APIResourceSchema { - schema := &apisv1alpha1.APIResourceSchema{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apisv1alpha1.APIResourceSchemaSpec{ - Group: group, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: resource, - }, - Versions: versions, - }, - } - return schema -} diff --git a/pkg/server/config.go b/pkg/server/config.go index 46d59f8ef39..c0aed12ef9d 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -38,11 +38,9 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/admission" "k8s.io/apiserver/pkg/endpoints/filters" - "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/informerfactoryhack" "k8s.io/apiserver/pkg/quota/v1/generic" genericapiserver "k8s.io/apiserver/pkg/server" - genericfilters "k8s.io/apiserver/pkg/server/filters" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" @@ -57,15 +55,12 @@ import ( bootstrappolicy "github.com/kcp-dev/kcp/pkg/authorization/bootstrap" "github.com/kcp-dev/kcp/pkg/conversion" "github.com/kcp-dev/kcp/pkg/embeddedetcd" - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - "github.com/kcp-dev/kcp/pkg/indexers" "github.com/kcp-dev/kcp/pkg/informer" "github.com/kcp-dev/kcp/pkg/server/bootstrap" kcpfilters "github.com/kcp-dev/kcp/pkg/server/filters" kcpserveroptions "github.com/kcp-dev/kcp/pkg/server/options" "github.com/kcp-dev/kcp/pkg/server/options/batteries" "github.com/kcp-dev/kcp/pkg/server/requestinfo" - "github.com/kcp-dev/kcp/pkg/tunneler" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" @@ -370,14 +365,6 @@ func NewConfig(opts kcpserveroptions.CompletedOptions) (*Config, error) { // Make sure to set our RequestInfoResolver that is capable of populating a RequestInfo even for /services/... URLs. c.GenericConfig.RequestInfoResolver = requestinfo.NewKCPRequestInfoResolver() - if kcpfeatures.DefaultFeatureGate.Enabled(kcpfeatures.SyncerTunnel) { - kubeBasicLongRunningRequestCheck := c.GenericConfig.LongRunningFunc - tunnelBasicLongRunningRequestCheck := genericfilters.BasicLongRunningRequestCheck(sets.NewString(""), sets.NewString("tunnel")) - c.GenericConfig.LongRunningFunc = func(r *http.Request, requestInfo *request.RequestInfo) bool { - return kubeBasicLongRunningRequestCheck(r, requestInfo) || tunnelBasicLongRunningRequestCheck(r, requestInfo) - } - } - // preHandlerChainMux is called before the actual handler chain. Note that BuildHandlerChainFunc below // is called multiple times, but only one of the handler chain will actually be used. Hence, we wrap it // to give handlers below one mux.Handle func to call. @@ -388,17 +375,6 @@ func NewConfig(opts kcpserveroptions.CompletedOptions) (*Config, error) { apiHandler = authorization.WithSubjectAccessReviewAuditAnnotations(apiHandler) apiHandler = authorization.WithDeepSubjectAccessReview(apiHandler) - if kcpfeatures.DefaultFeatureGate.Enabled(kcpfeatures.SyncerTunnel) { - tunneler := tunneler.NewTunneler() - apiHandler = tunneler.WithSyncerTunnelHandler(apiHandler) - apiHandler = tunneler.WithPodSubresourceProxying( - apiHandler, - c.DynamicClusterClient, - c.KcpSharedInformerFactory, - c.CacheKcpSharedInformerFactory, - ) - } - // The following ensures that only the default main api handler chain executes authorizers which log audit messages. // All other invocations of the same authorizer chain still work but do not produce audit log entries. // This compromises audit log size and information overflow vs. having audit reasons for the main api handler only. @@ -525,10 +501,8 @@ func NewConfig(opts kcpserveroptions.CompletedOptions) (*Config, error) { // make sure the informer gets started, otherwise conversions will not work! _ = c.KcpSharedInformerFactory.Apis().V1alpha1().APIConversions().Informer() - c.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions().Informer().GetIndexer().AddIndexers(cache.Indexers{byGroupResourceName: indexCRDByGroupResourceName}) //nolint:errcheck - c.KcpSharedInformerFactory.Apis().V1alpha1().APIBindings().Informer().GetIndexer().AddIndexers(cache.Indexers{byIdentityGroupResource: indexAPIBindingByIdentityGroupResource}) //nolint:errcheck - c.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets().Informer().GetIndexer().AddIndexers(cache.Indexers{indexers.SyncTargetsBySyncTargetKey: indexers.IndexSyncTargetsBySyncTargetKey}) //nolint:errcheck - c.CacheKcpSharedInformerFactory.Workload().V1alpha1().SyncTargets().Informer().GetIndexer().AddIndexers(cache.Indexers{indexers.SyncTargetsBySyncTargetKey: indexers.IndexSyncTargetsBySyncTargetKey}) //nolint:errcheck + c.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions().Informer().GetIndexer().AddIndexers(cache.Indexers{byGroupResourceName: indexCRDByGroupResourceName}) //nolint:errcheck + c.KcpSharedInformerFactory.Apis().V1alpha1().APIBindings().Informer().GetIndexer().AddIndexers(cache.Indexers{byIdentityGroupResource: indexAPIBindingByIdentityGroupResource}) //nolint:errcheck c.ApiExtensions.ExtraConfig.ClusterAwareCRDLister = &apiBindingAwareCRDClusterLister{ kcpClusterClient: c.KcpClusterClient, diff --git a/pkg/server/controllers.go b/pkg/server/controllers.go index 668ed9d7494..cc93711a8f2 100644 --- a/pkg/server/controllers.go +++ b/pkg/server/controllers.go @@ -77,7 +77,6 @@ import ( "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/workspace" "github.com/kcp-dev/kcp/pkg/reconciler/tenancy/workspacetype" "github.com/kcp-dev/kcp/pkg/reconciler/topology/partitionset" - workloadsapiexport "github.com/kcp-dev/kcp/pkg/reconciler/workload/apiexport" initializingworkspacesbuilder "github.com/kcp-dev/kcp/pkg/virtual/initializingworkspaces/builder" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" @@ -1149,7 +1148,7 @@ func (s *Server) installExtraAnnotationSyncController(ctx context.Context, confi } return s.AddPostStartHook(postStartHookName(extraannotationsync.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadsapiexport.ControllerName)) + logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(extraannotationsync.ControllerName)) if err := s.WaitForSync(hookContext.StopCh); err != nil { logger.Error(err, "failed to finish post-start-hook") return nil // don't klog.Fatal. This only happens when context is cancelled. diff --git a/pkg/server/server.go b/pkg/server/server.go index a99dae5bb51..b2595e72d99 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -39,7 +39,6 @@ import ( configshard "github.com/kcp-dev/kcp/config/shard" systemcrds "github.com/kcp-dev/kcp/config/system-crds" bootstrappolicy "github.com/kcp-dev/kcp/pkg/authorization/bootstrap" - "github.com/kcp-dev/kcp/pkg/indexers" "github.com/kcp-dev/kcp/pkg/informer" metadataclient "github.com/kcp-dev/kcp/pkg/metadata" virtualrootapiserver "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" @@ -104,12 +103,7 @@ func NewServer(c CompletedConfig) (*Server, error) { func(obj interface{}) bool { return true }, nil, crdGVRSource, - indexers.AppendOrDie( - cache.Indexers{ - indexers.BySyncerFinalizerKey: indexers.IndexBySyncerFinalizerKey, - indexers.ByClusterResourceStateLabelKey: indexers.IndexByClusterResourceStateLabelKey, - }, - ), + cache.Indexers{}, ) if err != nil { return nil, err diff --git a/pkg/syncer/OWNERS b/pkg/syncer/OWNERS deleted file mode 100644 index 56d4142d739..00000000000 --- a/pkg/syncer/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -approvers: -- ncdc -- sttts -- davidfestal -reviewers: -- jmprusi diff --git a/pkg/syncer/apiimporter.go b/pkg/syncer/apiimporter.go deleted file mode 100644 index 0fe3d9297af..00000000000 --- a/pkg/syncer/apiimporter.go +++ /dev/null @@ -1,361 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "context" - "fmt" - "reflect" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiextensionsv1client "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/crdpuller" - "github.com/kcp-dev/kcp/pkg/logging" - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" - kcpclusterclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - apiresourceinformer "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apiresource/v1alpha1" - workloadinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -var clusterKind = reflect.TypeOf(workloadv1alpha1.SyncTarget{}).Name() - -const GVRForLocationIndexName = "GVRForLocation" - -func GetGVRForLocationIndexKey(location string, gvr metav1.GroupVersionResource) string { - return location + "$$" + gvr.String() -} - -const LocationIndexName = "Location" - -func GetLocationIndexKey(location string) string { - return location -} - -func clusterAsOwnerReference(obj *workloadv1alpha1.SyncTarget, controller bool) metav1.OwnerReference { - return metav1.OwnerReference{ - APIVersion: workloadv1alpha1.SchemeGroupVersion.String(), - Kind: clusterKind, - Name: obj.Name, - UID: obj.UID, - Controller: &controller, - } -} - -func NewAPIImporter( - upstreamConfig, downstreamConfig *rest.Config, - synctargetInformer workloadinformers.SyncTargetInformer, - apiImportInformer apiresourceinformer.APIResourceImportInformer, - resourcesToSync []string, - syncTargetPath logicalcluster.Path, - syncTargetName string, - syncTargetUID types.UID, -) (*APIImporter, error) { - agent := fmt.Sprintf("kcp-workload-api-importer-%s-%s", syncTargetPath, syncTargetName) - upstreamConfig = rest.AddUserAgent(rest.CopyConfig(upstreamConfig), agent) - downstreamConfig = rest.AddUserAgent(rest.CopyConfig(downstreamConfig), agent) - - kcpClusterClient, err := kcpclusterclientset.NewForConfig(upstreamConfig) - if err != nil { - return nil, err - } - - kcpClient := kcpClusterClient.Cluster(syncTargetPath) - - importIndexer := apiImportInformer.Informer().GetIndexer() - - indexers := map[string]cache.IndexFunc{ - GVRForLocationIndexName: func(obj interface{}) ([]string, error) { - if apiResourceImport, ok := obj.(*apiresourcev1alpha1.APIResourceImport); ok { - return []string{GetGVRForLocationIndexKey(apiResourceImport.Spec.Location, apiResourceImport.GVR())}, nil - } - return []string{}, nil - }, - LocationIndexName: func(obj interface{}) ([]string, error) { - if apiResourceImport, ok := obj.(*apiresourcev1alpha1.APIResourceImport); ok { - return []string{GetLocationIndexKey(apiResourceImport.Spec.Location)}, nil - } - return []string{}, nil - }, - } - - // Ensure the indexers are only added if not already present. - for indexName := range importIndexer.GetIndexers() { - delete(indexers, indexName) - } - if len(indexers) > 0 { - if err := importIndexer.AddIndexers(indexers); err != nil { - return nil, fmt.Errorf("failed to add indexer for API Importer: %w", err) - } - } - - crdClient, err := apiextensionsv1client.NewForConfig(downstreamConfig) - if err != nil { - return nil, fmt.Errorf("error creating downstream apiextensions client: %w", err) - } - discoveryClient, err := discovery.NewDiscoveryClientForConfig(downstreamConfig) - if err != nil { - return nil, fmt.Errorf("error creating downstream discovery client: %w", err) - } - - schemaPuller, err := crdpuller.NewSchemaPuller(discoveryClient, crdClient) - if err != nil { - return nil, err - } - - return &APIImporter{ - kcpClient: kcpClient, - resourcesToSync: resourcesToSync, - apiresourceImportIndexer: importIndexer, - syncTargetLister: synctargetInformer.Lister(), - - syncTargetName: syncTargetName, - syncTargetUID: syncTargetUID, - schemaPuller: schemaPuller, - }, nil -} - -type APIImporter struct { - kcpClient kcpclientset.Interface - resourcesToSync []string - apiresourceImportIndexer cache.Indexer - syncTargetLister workloadv1alpha1listers.SyncTargetLister - - syncTargetName string - syncTargetUID types.UID - schemaPuller schemaPuller - SyncedGVRs map[string]metav1.GroupVersionResource -} - -// schemaPuller allows pulling the API resources as CRDs -// from a kubernetes cluster. -type schemaPuller interface { - // PullCRDs allows pulling the resources named by their plural names - // and make them available as CRDs in the output map. - PullCRDs(context context.Context, resourceNames ...string) (map[schema.GroupResource]*apiextensionsv1.CustomResourceDefinition, error) -} - -func (i *APIImporter) Start(ctx context.Context, pollInterval time.Duration) { - defer runtime.HandleCrash() - - logger := logging.WithReconciler(klog.FromContext(ctx), "api-importer") - ctx = klog.NewContext(ctx, logger) - - logger.Info("Starting API Importer") - - go wait.UntilWithContext(ctx, func(innerCtx context.Context) { - i.ImportAPIs(innerCtx) - }, pollInterval) - - <-ctx.Done() - i.Stop(ctx) -} - -func (i *APIImporter) Stop(ctx context.Context) { - logger := klog.FromContext(ctx) - logger.Info("stopping API Importer") - - objs, err := i.apiresourceImportIndexer.ByIndex( - LocationIndexName, - GetLocationIndexKey(i.syncTargetName), - ) - if err != nil { - logger.Error(err, "error trying to list APIResourceImport objects") - } - for _, obj := range objs { - apiResourceImportToDelete := obj.(*apiresourcev1alpha1.APIResourceImport) - logger := logger.WithValues("apiResourceImport", apiResourceImportToDelete.Name) - err := i.kcpClient.ApiresourceV1alpha1().APIResourceImports().Delete(context.Background(), apiResourceImportToDelete.Name, metav1.DeleteOptions{}) - if err != nil { - logger.Error(err, "error deleting APIResourceImport") - } - } -} - -func (i *APIImporter) ImportAPIs(ctx context.Context) { - logger := klog.FromContext(ctx) - - syncTarget, err := i.syncTargetLister.Get(i.syncTargetName) - - if err != nil { - logger.Error(err, "error getting syncTarget") - return - } - - if syncTarget.GetUID() != i.syncTargetUID { - logger.Error(fmt.Errorf("syncTarget uid is not correct, current: %s, required: %s", syncTarget.GetUID(), i.syncTargetUID), "error getting syncTarget") - return - } - - // merge resourceToSync from synctarget with resourcesToSync set on the syncer's flag. - resourceToSyncSet := sets.New[string](i.resourcesToSync...) - for _, rs := range syncTarget.Status.SyncedResources { - resourceToSyncSet.Insert(fmt.Sprintf("%s.%s", rs.Resource, rs.Group)) - } - // return if no resources to import - resourcesToSync := sets.List[string](resourceToSyncSet) - logger.V(2).Info("Importing APIs", "resourcesToImport", resourcesToSync) - if resourceToSyncSet.Len() == 0 { - return - } - - crds, err := i.schemaPuller.PullCRDs(ctx, resourcesToSync...) - if err != nil { - logger.Error(err, "error pulling CRDs") - return - } - - gvrsToSync := map[string]metav1.GroupVersionResource{} - for groupResource, pulledCrd := range crds { - crdVersion := pulledCrd.Spec.Versions[0] - gvr := metav1.GroupVersionResource{ - Group: pulledCrd.Spec.Group, - Version: crdVersion.Name, - Resource: groupResource.Resource, - } - logger := logger.WithValues( - "group", gvr.Group, - "version", gvr.Version, - "resource", gvr.Resource, - ) - - objs, err := i.apiresourceImportIndexer.ByIndex( - GVRForLocationIndexName, - GetGVRForLocationIndexKey(i.syncTargetName, gvr), - ) - if err != nil { - logger.Error(err, "error pulling CRDs") - continue - } - if len(objs) > 1 { - logger.Error(fmt.Errorf("there should be only one APIResourceImport but there was %d", len(objs)), "err importing APIs") - continue - } - if len(objs) == 1 { - apiResourceImport := objs[0].(*apiresourcev1alpha1.APIResourceImport).DeepCopy() - if err := apiResourceImport.Spec.SetSchema(crdVersion.Schema.OpenAPIV3Schema); err != nil { - logger.Error(err, "error setting schema") - continue - } - logger = logger.WithValues("apiResourceImport", apiResourceImport.Name) - logger.Info("updating APIResourceImport") - if _, err := i.kcpClient.ApiresourceV1alpha1().APIResourceImports().Update(ctx, apiResourceImport, metav1.UpdateOptions{}); err != nil { - logger.Error(err, "error updating APIResourceImport") - continue - } - } else { - apiResourceImportName := gvr.Resource + "." + i.syncTargetName + "." + gvr.Version + "." - if gvr.Group == "" { - apiResourceImportName += "core" - } else { - apiResourceImportName += gvr.Group - } - groupVersion := apiresourcev1alpha1.GroupVersion{ - Group: gvr.Group, - Version: gvr.Version, - } - apiResourceImport := &apiresourcev1alpha1.APIResourceImport{ - ObjectMeta: metav1.ObjectMeta{ - Name: apiResourceImportName, - OwnerReferences: []metav1.OwnerReference{ - clusterAsOwnerReference(syncTarget, true), - }, - Annotations: map[string]string{ - apiresourcev1alpha1.APIVersionAnnotation: groupVersion.APIVersion(), - }, - }, - Spec: apiresourcev1alpha1.APIResourceImportSpec{ - Location: i.syncTargetName, - SchemaUpdateStrategy: apiresourcev1alpha1.UpdateUnpublished, - CommonAPIResourceSpec: apiresourcev1alpha1.CommonAPIResourceSpec{ - GroupVersion: apiresourcev1alpha1.GroupVersion{ - Group: gvr.Group, - Version: gvr.Version, - }, - Scope: pulledCrd.Spec.Scope, - CustomResourceDefinitionNames: pulledCrd.Spec.Names, - SubResources: *(&apiresourcev1alpha1.SubResources{}).ImportFromCRDVersion(&crdVersion), - ColumnDefinitions: *(&apiresourcev1alpha1.ColumnDefinitions{}).ImportFromCRDVersion(&crdVersion), - }, - }, - } - if err := apiResourceImport.Spec.SetSchema(crdVersion.Schema.OpenAPIV3Schema); err != nil { - logger.Error(err, "error setting schema") - continue - } - if value, found := pulledCrd.Annotations[apiextensionsv1.KubeAPIApprovedAnnotation]; found { - apiResourceImport.Annotations[apiextensionsv1.KubeAPIApprovedAnnotation] = value - } - - logger.Info("creating APIResourceImport") - if _, err := i.kcpClient.ApiresourceV1alpha1().APIResourceImports().Create(ctx, apiResourceImport, metav1.CreateOptions{}); err != nil { - logger.Error(err, "error creating APIResourceImport") - continue - } - } - gvrsToSync[gvr.String()] = gvr - } - - gvrsToRemove := sets.StringKeySet(i.SyncedGVRs).Difference(sets.StringKeySet(gvrsToSync)) - for _, gvrToRemove := range gvrsToRemove.UnsortedList() { - gvr := i.SyncedGVRs[gvrToRemove] - objs, err := i.apiresourceImportIndexer.ByIndex( - GVRForLocationIndexName, - GetGVRForLocationIndexKey(i.syncTargetName, gvr), - ) - logger := logger.WithValues( - "group", gvr.Group, - "version", gvr.Version, - "resource", gvr.Resource, - ) - - if err != nil { - logger.Error(err, "error pulling CRDs") - continue - } - if len(objs) > 1 { - logger.Error(fmt.Errorf("there should be only one APIResourceImport of GVR but there was %d", len(objs)), "err deleting APIResourceImport") - continue - } - if len(objs) == 1 { - apiResourceImportToRemove := objs[0].(*apiresourcev1alpha1.APIResourceImport) - logger = logger.WithValues("apiResourceImport", apiResourceImportToRemove.Name) - logger.Info("deleting APIResourceImport") - err := i.kcpClient.ApiresourceV1alpha1().APIResourceImports().Delete(ctx, apiResourceImportToRemove.Name, metav1.DeleteOptions{}) - if err != nil { - logger.Error(err, "error deleting APIResourceImport") - continue - } - } - } -} diff --git a/pkg/syncer/controllermanager/controllermanager.go b/pkg/syncer/controllermanager/controllermanager.go deleted file mode 100644 index fabab061137..00000000000 --- a/pkg/syncer/controllermanager/controllermanager.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllermanager - -import ( - "context" - "time" - - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" -) - -const ( - ControllerNamePrefix = "syncer-controller-manager-" -) - -// InformerSource is a dynamic source of informers per GVR, -// which notifies when informers are added or removed for some GVR. -// It is implemented by the DynamicSharedInformerFactory (in fact by -// both the scoped or cluster-aware variants). -type InformerSource struct { - // Subscribe registers for informer change notifications, returning a channel to which change notifications are sent. - // The id argument is the identifier of the subscriber, since there might be several subscribers subscribing - // to receive events from this InformerSource. - Subscribe func(id string) <-chan struct{} - - // Informers returns a map of per-resource-type SharedIndexInformers for all types that are - // known by this informer source, and that are synced. - // - // It also returns the list of informers that are known by this informer source, but sill not synced. - Informers func() (informers map[schema.GroupVersionResource]cache.SharedIndexInformer, notSynced []schema.GroupVersionResource) -} - -// ManagedController defines a controller that should be managed by a ControllerManager, -// to be started when the required GVRs are supported, and stopped when the required GVRs -// are not supported anymore. -type ManagedController struct { - RequiredGVRs []schema.GroupVersionResource - Create CreateControllerFunc -} - -type StartControllerFunc func(ctx context.Context) -type CreateControllerFunc func(ctx context.Context) (StartControllerFunc, error) - -// NewControllerManager creates a new ControllerManager which will manage (create/start/stop) GVR-specific controllers according to informers -// available in the provided InformerSource. -func NewControllerManager(ctx context.Context, suffix string, informerSource InformerSource, controllers map[string]ManagedController) *ControllerManager { - controllerManager := ControllerManager{ - name: ControllerNamePrefix + suffix, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerNamePrefix+suffix), - informerSource: informerSource, - managedControllers: controllers, - startedControllers: map[string]context.CancelFunc{}, - } - - apisChanged := informerSource.Subscribe(controllerManager.name) - - logger := klog.FromContext(ctx) - - go func() { - for { - select { - case <-ctx.Done(): - return - case <-apisChanged: - logger.V(4).Info("got API change notification") - controllerManager.queue.Add("resync") // this queue only ever has one key in it, as long as it's constant we are OK - } - } - }() - - return &controllerManager -} - -// ControllerManager is a component that manages (create/start/stop) GVR-specific controllers according to available GVRs. -// It reacts to the changes of supported GVRs in a DiscoveringDynamicSharedInformerFactory -// (the GVRs for which an informer has been automatically created, started and synced), -// and starts / stops registered GVRs-specific controllers according to the GVRs they depend on. -// -// For example this allows starting PVC / PV controllers only when PVC / PV resources are exposed by the Syncer and UpSyncer -// virtual workspaces, and Informers for them have been started and synced by the corresponding ddsif. -type ControllerManager struct { - name string - queue workqueue.RateLimitingInterface - informerSource InformerSource - managedControllers map[string]ManagedController - startedControllers map[string]context.CancelFunc -} - -// Start starts the controller, which stops when ctx.Done() is closed. -func (c *ControllerManager) Start(ctx context.Context) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), c.name) - logger.Info("Starting controller manager") - defer logger.Info("Shutting down controller manager") - - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - <-ctx.Done() -} - -func (c *ControllerManager) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *ControllerManager) processNextWorkItem(ctx context.Context) bool { - key, quit := c.queue.Get() - if quit { - return false - } - defer c.queue.Done(key) - - c.process(ctx) - c.queue.Forget(key) - return true -} - -func (c *ControllerManager) process(ctx context.Context) { - logger := klog.FromContext(ctx) - controllersToStart := map[string]CreateControllerFunc{} - syncedInformers, notSynced := c.informerSource.Informers() -controllerLoop: - for controllerName, managedController := range c.managedControllers { - requiredGVRs := managedController.RequiredGVRs - for _, gvr := range requiredGVRs { - informer := syncedInformers[gvr] - if informer == nil { - if shared.ContainsGVR(notSynced, gvr) { - logger.V(2).Info("waiting for the informer to be synced before starting controller", "gvr", gvr, "controller", controllerName) - c.queue.AddAfter("resync", time.Second) - continue controllerLoop - } - // The informer doesn't even exist for this GVR. - // Let's ignore this controller for now: one of the required GVRs has no informer started - // (because it has not been found on the SyncTarget in the supported resources to sync). - // If this required GVR is supported later on, the updateControllers() method will be called - // again after an API change notification comes through the informerSource. - continue controllerLoop - } - } - controllersToStart[controllerName] = managedController.Create - } - - // Remove obsolete controllers that don't have their required GVRs anymore - for controllerName, cancelFunc := range c.startedControllers { - if _, ok := controllersToStart[controllerName]; ok { - // The controller is still expected => don't remove it - continue - } - // The controller should not be running - // Stop it and remove it from the list of started controllers - cancelFunc() - delete(c.startedControllers, controllerName) - } - - // Create and start missing controllers that have their required GVRs synced - for controllerName, create := range controllersToStart { - if _, ok := c.startedControllers[controllerName]; ok { - // The controller is already started - continue - } - - // Create the controller - start, err := create(ctx) - if err != nil { - logger.Error(err, "failed creating controller", "controller", controllerName) - continue - } - - // Start the controller - controllerContext, cancelFunc := context.WithCancel(ctx) - go start(controllerContext) - c.startedControllers[controllerName] = cancelFunc - } -} diff --git a/pkg/syncer/endpoints/endpoint_downstream_controller.go b/pkg/syncer/endpoints/endpoint_downstream_controller.go deleted file mode 100644 index 6f5b4ce6e27..00000000000 --- a/pkg/syncer/endpoints/endpoint_downstream_controller.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const ( - ControllerName = "syncer-endpoint-controller" -) - -var ( - endpointsGVR = corev1.SchemeGroupVersion.WithResource("endpoints") - servicesGVR = corev1.SchemeGroupVersion.WithResource("services") - namespacesGVR = corev1.SchemeGroupVersion.WithResource("namespaces") -) - -// NewEndpointController returns new controller which would annotate Endpoints related to synced Services, so that those Endpoints -// would be upsynced by the UpSyncer to the upstream KCP workspace. -// This would be useful to enable components such as a KNative controller (running against the KCP workspace) to see the Endpoint, -// and confirm that the related Service is effective. -func NewEndpointController( - downstreamClient dynamic.Interface, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], - syncTargetClusterName logicalcluster.Name, - syncTargetName string, - syncTargetUID types.UID, -) (*controller, error) { - c := &controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName), - - syncTargetClusterName: syncTargetClusterName, - syncTargetName: syncTargetName, - syncTargetUID: syncTargetUID, - syncTargetKey: workloadv1alpha1.ToSyncTargetKey(syncTargetClusterName, syncTargetName), - - getDownstreamResource: func(gvr schema.GroupVersionResource, namespace, name string) (*unstructured.Unstructured, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", gvr) - } - object, err := informer.Lister().ByNamespace(namespace).Get(name) - if err != nil { - return nil, err - } - unstr, ok := object.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("object type should be *unstructured.Unstructured but was %t", object) - } - return unstr, nil - }, - getDownstreamNamespace: func(name string) (*unstructured.Unstructured, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[namespacesGVR] - if !ok { - if shared.ContainsGVR(notSynced, namespacesGVR) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory", namespacesGVR) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", namespacesGVR) - } - object, err := informer.Lister().Get(name) - if err != nil { - return nil, err - } - unstr, ok := object.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("object type should be *unstructured.Unstructured but was %t", object) - } - return unstr, nil - }, - patchEndpoint: func(ctx context.Context, namespace, name string, pt types.PatchType, data []byte) error { - _, err := downstreamClient.Resource(endpointsGVR).Namespace(namespace).Patch(ctx, name, pt, data, metav1.PatchOptions{}) - return err - }, - } - - informers, _ := ddsifForDownstream.Informers() - endpointsInformer, ok := informers[endpointsGVR] - if !ok { - return nil, errors.New("endpoints informer should be available") - } - - _, _ = endpointsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - c.enqueueEndpoints(obj) - }, - UpdateFunc: func(old, new interface{}) { - c.enqueueEndpoints(new) - }, - }) - - servicesInformer, ok := informers[servicesGVR] - if !ok { - return nil, errors.New("endpoints informer should be available") - } - - _, _ = servicesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(old, new interface{}) { - c.enqueueService(new) - }, - }) - - return c, nil -} - -type controller struct { - queue workqueue.RateLimitingInterface - - syncTargetClusterName logicalcluster.Name - syncTargetName string - syncTargetUID types.UID - syncTargetKey string - - getDownstreamResource func(gvr schema.GroupVersionResource, namespace, name string) (*unstructured.Unstructured, error) - getDownstreamNamespace func(name string) (*unstructured.Unstructured, error) - patchEndpoint func(ctx context.Context, namespace, name string, pt types.PatchType, data []byte) error -} - -func (c *controller) enqueueEndpoints(obj interface{}) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - logger.V(2).Info("queueing") - c.queue.Add(key) -} - -func (c *controller) enqueueService(obj interface{}) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return - } - - logger := logging.WithQueueKey(logging.WithReconciler(klog.Background(), ControllerName), key) - - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - logger.Error(err, "error when queueing from the related service") - } - - _, err = c.getDownstreamResource(endpointsGVR, namespace, name) - if kerrors.IsNotFound(err) { - // no related Endpoints resource => nothing to do - return - } - if err != nil { - logger.Error(err, "error when queueing from the related service") - } - - logger.V(2).Info("queueing from service") - c.queue.Add(key) -} - -// Start starts N worker processes processing work items. -func (c *controller) Start(ctx context.Context, numThreads int) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer func() { - logger.Info("Shutting down controller") - }() - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -// startWorker processes work items until stopCh is closed. -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - key, quit := c.queue.Get() - if quit { - return false - } - - qk := key.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), qk) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing", qk) - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, qk); err != nil { - utilruntime.HandleError(fmt.Errorf("%s failed to sync %q, err: %w", ControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - - c.queue.Forget(key) - - return true -} diff --git a/pkg/syncer/endpoints/endpoint_downstream_process.go b/pkg/syncer/endpoints/endpoint_downstream_process.go deleted file mode 100644 index 5c9f4416ddd..00000000000 --- a/pkg/syncer/endpoints/endpoint_downstream_process.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "context" - "strings" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func (c *controller) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - logger = logger.WithValues(logging.NamespaceKey, namespace, logging.NameKey, name) - - namespaceObj, err := c.getDownstreamNamespace(namespace) - if kerrors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - - locator, ok, err := shared.LocatorFromAnnotations(namespaceObj.GetAnnotations()) - if err != nil { - return err - } - if !ok { - return nil - } - - if string(locator.SyncTarget.UID) != string(c.syncTargetUID) || - locator.SyncTarget.Name != c.syncTargetName || - locator.SyncTarget.ClusterName != c.syncTargetClusterName.String() { - return nil - } - - endpoints, err := c.getDownstreamResource(endpointsGVR, namespace, name) - if kerrors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - - if endpoints.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+c.syncTargetKey] == string(workloadv1alpha1.ResourceStateUpsync) { - // Endpoints resource already labelled for upsyncing. Nothing more to do. - return nil - } - - service, err := c.getDownstreamResource(servicesGVR, namespace, name) - if kerrors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - - // Service has owner refs ? => it was certainly created downstream from an already synced higher level resource (KNative ?). - // Resources synced by the Syncer do not have owner references. - if len(service.GetOwnerReferences()) > 0 { - logger.V(3).Info("ignoring endpoint since it has an owner reference") - return nil - } - - derivedResourcesToUpsync := strings.Split(service.GetAnnotations()[workloadv1alpha1.ExperimentalUpsyncDerivedResourcesAnnotationKey], ",") - if len(derivedResourcesToUpsync) == 0 || - !sets.New[string](derivedResourcesToUpsync...).Has(endpointsGVR.GroupResource().String()) { - logger.V(3).Info("ignoring endpoint since it is not mentioned in the service 'workload.kcp.io/upsync-derived-resources' annotation") - return nil - } - - logger.V(1).Info("adding the upsync label on endpoint") - err = c.patchEndpoint(ctx, namespace, name, types.StrategicMergePatchType, []byte( - `{"metadata": {"labels": {"`+ - workloadv1alpha1.ClusterResourceStateLabelPrefix+c.syncTargetKey+`":"`+string(workloadv1alpha1.ResourceStateUpsync)+ - `"}}}`)) - if kerrors.IsNotFound(err) { - return nil - } - return err -} diff --git a/pkg/syncer/endpoints/endpoint_downstream_process_test.go b/pkg/syncer/endpoints/endpoint_downstream_process_test.go deleted file mode 100644 index fa5c450e045..00000000000 --- a/pkg/syncer/endpoints/endpoint_downstream_process_test.go +++ /dev/null @@ -1,394 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "context" - "encoding/json" - "errors" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var scheme *runtime.Scheme - -func init() { - scheme = runtime.NewScheme() - _ = corev1.AddToScheme(scheme) -} - -func TestEndpointsControllerProcess(t *testing.T) { - defaultSyncTargetLocator := shared.SyncTargetLocator{ - ClusterName: "root:org:ws", - Name: "us-west1", - UID: types.UID("syncTargetUID"), - } - - tests := map[string]struct { - endpointName string - - syncTargetLocator *shared.SyncTargetLocator - - downstreamNamespace *corev1.Namespace - namespaceGetError error - - downstreamEndpoints *corev1.Endpoints - endpointsGetError error - - downstreamService *corev1.Service - serviceGetError error - - expectedError string - expectedPatch string - expectedPatchType types.PatchType - }{ - "Label the endpoints when service is there and annotated": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").WithAnnotations(map[string]string{ - "experimental.workload.kcp.io/upsync-derived-resources": "pods,endpoints", - }).Object(), - expectedPatch: `{"metadata": {"labels": {"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g":"Upsync"}}}`, - expectedPatchType: types.StrategicMergePatchType, - }, - "Don't label the endpoints when service is there but not annotated correctly": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").WithAnnotations(map[string]string{ - "workload.kcp.io/upsync-derived-resources": "pods", - }).Object(), - }, - "Don't label the endpoints when service is there but not annotated": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Don't label the endpoints when namespace resource is not found": { - endpointName: "httpecho", - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Error when namespace retrieval fails": { - endpointName: "httpecho", - namespaceGetError: errors.New("error"), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - expectedError: "error", - }, - "Don't label the endpoints when namespace has no locator": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Error on wrong namespace locator": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": "invalid json content", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - expectedError: "invalid character 'i' looking for beginning of value", - }, - "Don't label the endpoints when namespace locator syncTarget cluster name doesn't match": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - ClusterName: "another Cluster", - Name: defaultSyncTargetLocator.Name, - UID: defaultSyncTargetLocator.UID, - }, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Don't label the endpoints when namespace locator syncTarget name doesn't match": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - ClusterName: defaultSyncTargetLocator.ClusterName, - Name: "anotherName", - UID: defaultSyncTargetLocator.UID, - }, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Don't label the endpoints when namespace locator syncTarget UID doesn't match": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - ClusterName: defaultSyncTargetLocator.ClusterName, - Name: defaultSyncTargetLocator.Name, - UID: types.UID("anotherUID"), - }, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Don't label the endpoints when endpoints resource is not found": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Error when endpoints retrieval fails": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - endpointsGetError: errors.New("error"), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - expectedError: "error", - }, - "Don't label the endpoints when endpoints is labelled for upsync already": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").WithLabels(map[string]string{ - "state.workload.kcp.io/" + workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name(defaultSyncTargetLocator.ClusterName), defaultSyncTargetLocator.Name): "Upsync", - }).Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Don't label the endpoints when service resource is not found": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - }, - "Error when service retrieval fails": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - serviceGetError: errors.New("error"), - expectedError: "error", - }, - "Don't label the endpoints when service has an owner refs": { - endpointName: "httpecho", - downstreamNamespace: namespace("downstream-ns").WithLocator(t, shared.NamespaceLocator{SyncTarget: defaultSyncTargetLocator, - ClusterName: logicalcluster.Name("root:org:ws"), - Namespace: "ns", - }).Object(), - downstreamEndpoints: endpoints("httpecho").WithNamespace("downstream-ns").Object(), - downstreamService: service("httpecho").WithNamespace("downstream-ns").WithOwnerRef().Object(), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - if tc.syncTargetLocator == nil { - tc.syncTargetLocator = &defaultSyncTargetLocator - } - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name(tc.syncTargetLocator.ClusterName), tc.syncTargetLocator.Name) - - actualPatch := "" - var actualPatchType types.PatchType - controller := controller{ - queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), - - syncTargetName: tc.syncTargetLocator.Name, - syncTargetClusterName: logicalcluster.Name(tc.syncTargetLocator.ClusterName), - syncTargetUID: tc.syncTargetLocator.UID, - syncTargetKey: syncTargetKey, - - getDownstreamResource: func(gvr schema.GroupVersionResource, namespace, name string) (*unstructured.Unstructured, error) { - var obj runtime.Object - switch gvr { - case endpointsGVR: - if tc.endpointsGetError != nil { - return nil, tc.endpointsGetError - } - if tc.downstreamEndpoints == nil { - return nil, apierrors.NewNotFound(gvr.GroupResource(), name) - } - obj = tc.downstreamEndpoints - case servicesGVR: - if tc.serviceGetError != nil { - return nil, tc.serviceGetError - } - if tc.downstreamService == nil { - return nil, apierrors.NewNotFound(gvr.GroupResource(), name) - } - obj = tc.downstreamService - } - var unstr unstructured.Unstructured - err := scheme.Convert(obj, &unstr, nil) - require.NoError(t, err) - return &unstr, nil - }, - getDownstreamNamespace: func(name string) (*unstructured.Unstructured, error) { - if tc.namespaceGetError != nil { - return nil, tc.namespaceGetError - } - if tc.downstreamNamespace == nil { - return nil, apierrors.NewNotFound(namespacesGVR.GroupResource(), name) - } - var unstr unstructured.Unstructured - err := scheme.Convert(tc.downstreamNamespace, &unstr, nil) - require.NoError(t, err) - return &unstr, nil - }, - patchEndpoint: func(ctx context.Context, namespace, name string, pt types.PatchType, data []byte) error { - actualPatch = string(data) - actualPatchType = pt - return nil - }, - } - - namespaceName := "" - if tc.downstreamNamespace != nil { - namespaceName = tc.downstreamNamespace.Name - } - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(&metav1.ObjectMeta{ - Name: tc.endpointName, - Namespace: namespaceName, - }) - require.NoError(t, err) - err = controller.process(ctx, key) - - if tc.expectedError != "" { - require.EqualError(t, err, tc.expectedError) - } else { - require.NoError(t, err) - } - - require.Equal(t, tc.expectedPatch, actualPatch) - require.Equal(t, tc.expectedPatchType, actualPatchType) - }) - } -} - -type resourceBuilder[Type metav1.Object] struct { - obj Type -} - -func (r *resourceBuilder[Type]) Object() Type { - return r.obj -} - -func (r *resourceBuilder[Type]) WithNamespace(namespace string) *resourceBuilder[Type] { - r.obj.SetNamespace(namespace) - return r -} - -func (r *resourceBuilder[Type]) WithOwnerRef() *resourceBuilder[Type] { - r.obj.SetOwnerReferences([]metav1.OwnerReference{ - { - Name: "name", - }, - }) - return r -} - -func (r *resourceBuilder[Type]) WithLocator(t *testing.T, locator shared.NamespaceLocator) *resourceBuilder[Type] { - locatorJSON, err := json.Marshal(locator) - require.NoError(t, err) - r.WithAnnotations(map[string]string{ - shared.NamespaceLocatorAnnotation: string(locatorJSON), - }) - return r -} - -func (r *resourceBuilder[Type]) WithAnnotations(additionalAnnotations map[string]string) *resourceBuilder[Type] { - annotations := r.obj.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - for k, v := range additionalAnnotations { - annotations[k] = v - } - - r.obj.SetAnnotations(annotations) - return r -} - -func (r *resourceBuilder[Type]) WithLabels(additionalLabels map[string]string) *resourceBuilder[Type] { - labels := r.obj.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - for k, v := range additionalLabels { - labels[k] = v - } - - r.obj.SetLabels(labels) - return r -} - -func newResourceBuilder[Type metav1.Object](obj Type, name string) *resourceBuilder[Type] { - obj.SetName(name) - return &resourceBuilder[Type]{obj} -} - -func namespace(name string) *resourceBuilder[*corev1.Namespace] { - return newResourceBuilder(&corev1.Namespace{}, name) -} - -func endpoints(name string) *resourceBuilder[*corev1.Endpoints] { - return newResourceBuilder(&corev1.Endpoints{}, name) -} - -func service(name string) *resourceBuilder[*corev1.Service] { - return newResourceBuilder(&corev1.Service{}, name) -} diff --git a/pkg/syncer/indexers/indexes.go b/pkg/syncer/indexers/indexes.go deleted file mode 100644 index 6f34484dd68..00000000000 --- a/pkg/syncer/indexers/indexes.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package indexers - -import ( - "encoding/json" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" -) - -const ( - ByNamespaceLocatorIndexName = "syncer-spec-ByNamespaceLocator" -) - -// indexByNamespaceLocator is a cache.IndexFunc that indexes namespaces by the namespaceLocator annotation. -func IndexByNamespaceLocator(obj interface{}) ([]string, error) { - metaObj, ok := obj.(metav1.Object) - if !ok { - return []string{}, fmt.Errorf("obj is supposed to be a metav1.Object, but is %T", obj) - } - if loc, found, err := shared.LocatorFromAnnotations(metaObj.GetAnnotations()); err != nil { - return []string{}, fmt.Errorf("failed to get locator from annotations: %w", err) - } else if !found { - return []string{}, nil - } else { - bs, err := json.Marshal(loc) - if err != nil { - return []string{}, fmt.Errorf("failed to marshal locator %#v: %w", loc, err) - } - return []string{string(bs)}, nil - } -} diff --git a/pkg/syncer/namespace/namespace_downstream_controller.go b/pkg/syncer/namespace/namespace_downstream_controller.go deleted file mode 100644 index ada94cee588..00000000000 --- a/pkg/syncer/namespace/namespace_downstream_controller.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/go-logr/logr" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" -) - -const ( - controllerNameRoot = "kcp-workload-syncer-namespace" - downstreamControllerName = controllerNameRoot + "-downstream" -) - -type DownstreamController struct { - queue workqueue.RateLimitingInterface - delayedQueue workqueue.RateLimitingInterface - - lock sync.Mutex - toDeleteMap map[string]time.Time - namespaceCleanDelay time.Duration - - deleteDownstreamNamespace func(ctx context.Context, namespace string) error - upstreamNamespaceExists func(clusterName logicalcluster.Name, upstreamNamespaceName string) (bool, error) - getDownstreamNamespace func(name string) (runtime.Object, error) - listDownstreamNamespaces func() ([]runtime.Object, error) - isDowntreamNamespaceEmpty func(ctx context.Context, namespace string) (bool, error) - createConfigMap func(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) - updateConfigMap func(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) - - syncTargetName string - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID - syncTargetKey string - dnsNamespace string -} - -func NewDownstreamController( - syncerLogger logr.Logger, - syncTargetWorkspace logicalcluster.Name, - syncTargetName, syncTargetKey string, - syncTargetUID types.UID, - downstreamConfig *rest.Config, - downstreamClient dynamic.Interface, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], - getShardAccess synctarget.GetShardAccessFunc, - dnsNamespace string, - namespaceCleanDelay time.Duration, -) (*DownstreamController, error) { - namespaceGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - logger := logging.WithReconciler(syncerLogger, downstreamControllerName) - kubeClient := kubernetes.NewForConfigOrDie(downstreamConfig) - - c := DownstreamController{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), downstreamControllerName), - delayedQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), downstreamControllerName), - toDeleteMap: make(map[string]time.Time), - deleteDownstreamNamespace: func(ctx context.Context, namespace string) error { - return downstreamClient.Resource(namespaceGVR).Delete(ctx, namespace, metav1.DeleteOptions{}) - }, - upstreamNamespaceExists: func(clusterName logicalcluster.Name, upstreamNamespaceName string) (bool, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return false, err - } - if !ok { - return false, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - - informer, err := shardAccess.SyncerDDSIF.ForResource(namespaceGVR) - if err != nil { - return false, err - } - - _, err = informer.Lister().ByCluster(clusterName).Get(upstreamNamespaceName) - if apierrors.IsNotFound(err) { - return false, nil - } - if err != nil { - return false, err - } - return true, nil - }, - getDownstreamNamespace: func(downstreamNamespaceName string) (runtime.Object, error) { - informer, err := ddsifForDownstream.ForResource(namespaceGVR) - if err != nil { - return nil, err - } - return informer.Lister().Get(downstreamNamespaceName) - }, - listDownstreamNamespaces: func() ([]runtime.Object, error) { - informer, err := ddsifForDownstream.ForResource(namespaceGVR) - if err != nil { - return nil, err - } - return informer.Lister().List(labels.Everything()) - }, - isDowntreamNamespaceEmpty: func(ctx context.Context, namespace string) (bool, error) { - informers, notSynced := ddsifForDownstream.Informers() - if len(notSynced) > 0 { - return false, fmt.Errorf("some informers are still not synced in the downstream informer factory") - } - - for gvr, informer := range informers { - // Skip namespaces. - if gvr == namespaceGVR { - continue - } - list, err := informer.Lister().ByNamespace(namespace).List(labels.Everything()) - if err != nil { - return false, err - } - if len(list) > 0 { - return false, nil - } - } - return true, nil - }, - createConfigMap: func(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) { - return kubeClient.CoreV1().ConfigMaps(configMap.Namespace).Create(ctx, configMap, metav1.CreateOptions{}) - }, - updateConfigMap: func(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) { - return kubeClient.CoreV1().ConfigMaps(configMap.Namespace).Update(ctx, configMap, metav1.UpdateOptions{}) - }, - - syncTargetName: syncTargetName, - syncTargetClusterName: syncTargetWorkspace, - syncTargetUID: syncTargetUID, - syncTargetKey: syncTargetKey, - dnsNamespace: dnsNamespace, - - namespaceCleanDelay: namespaceCleanDelay, - } - - logger.V(2).Info("Set up downstream namespace informer") - - // Those handlers are for start/resync cases, in case a namespace deletion event is missed, these handlers - // will make sure that we cleanup the namespace in downstream after restart/resync. - ddsifForDownstream.AddEventHandler(ddsif.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - c.AddToQueue(obj, logger) - } - }, - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - c.AddToQueue(obj, logger) - } - }, - }) - return &c, nil -} - -func (c *DownstreamController) AddToQueue(obj interface{}, logger logr.Logger) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) // note: this is *not* a cluster-aware key - if err != nil { - utilruntime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info("queueing namespace") - c.queue.Add(key) -} - -// Start starts N worker processes processing work items. -func (c *DownstreamController) Start(ctx context.Context, numThreads int) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - defer c.delayedQueue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), downstreamControllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - go wait.UntilWithContext(ctx, c.startDelayedWorker, time.Second) - } - - <-ctx.Done() -} - -// startWorker processes work items until stopCh is closed. -func (c *DownstreamController) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *DownstreamController) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - key, quit := c.queue.Get() - if quit { - return false - } - namespaceKey := key.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), namespaceKey) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, namespaceKey); err != nil { - utilruntime.HandleError(fmt.Errorf("%s failed to sync %q, err: %w", downstreamControllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - - c.queue.Forget(key) - - return true -} - -func (c *DownstreamController) isPlannedForCleaning(key string) bool { - c.lock.Lock() - defer c.lock.Unlock() - _, ok := c.toDeleteMap[key] - return ok -} - -func (c *DownstreamController) CancelCleaning(key string) { - c.lock.Lock() - defer c.lock.Unlock() - delete(c.toDeleteMap, key) -} - -func (c *DownstreamController) PlanCleaning(key string) { - c.lock.Lock() - defer c.lock.Unlock() - now := time.Now() - if plannedFor, planned := c.toDeleteMap[key]; !planned || now.After(plannedFor) { - c.toDeleteMap[key] = now.Add(c.namespaceCleanDelay) - c.delayedQueue.AddAfter(key, c.namespaceCleanDelay) - } -} - -func (c *DownstreamController) startDelayedWorker(ctx context.Context) { - logger := klog.FromContext(ctx).WithValues("queue", "delayed") - ctx = klog.NewContext(ctx, logger) - for c.processNextDelayedWorkItem(ctx) { - } -} - -func (c *DownstreamController) processNextDelayedWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - key, quit := c.delayedQueue.Get() - if quit { - return false - } - namespaceKey := key.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), namespaceKey) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.delayedQueue.Done(key) - - if err := c.processDelayed(ctx, namespaceKey); err != nil { - utilruntime.HandleError(fmt.Errorf("%s failed to sync %q, err: %w", downstreamControllerName, key, err)) - c.delayedQueue.AddRateLimited(key) - return true - } - c.delayedQueue.Forget(key) - return true -} - -func (c *DownstreamController) processDelayed(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - if !c.isPlannedForCleaning(key) { - logger.V(2).Info("Namespace is not marked for deletion check anymore, skipping") - return nil - } - - empty, err := c.isDowntreamNamespaceEmpty(ctx, key) - if err != nil { - return fmt.Errorf("failed to check if downstream namespace is empty: %w", err) - } - if !empty { - logger.V(2).Info("Namespace is not empty, skip cleaning now but keep it as a candidate for future cleaning") - return nil - } - - err = c.deleteDownstreamNamespace(ctx, key) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - - if apierrors.IsNotFound(err) { - logger.V(2).Info("Namespace is not found, perhaps it was already deleted") - } - c.CancelCleaning(key) - return nil -} diff --git a/pkg/syncer/namespace/namespace_downstream_process.go b/pkg/syncer/namespace/namespace_downstream_process.go deleted file mode 100644 index 697d9a6c0b7..00000000000 --- a/pkg/syncer/namespace/namespace_downstream_process.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "encoding/json" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -func (c *DownstreamController) process(ctx context.Context, key string) error { - logger := klog.FromContext(ctx) - _, namespaceName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - logger.Error(err, "invalid key") - return nil - } - - logger = logger.WithValues(DownstreamNamespace, namespaceName) - - downstreamNamespaceObj, err := c.getDownstreamNamespace(namespaceName) - if apierrors.IsNotFound(err) { - logger.V(4).Info("downstream namespace not found, ignoring key") - return nil - } else if err != nil { - logger.Error(err, "failed to get downstream namespace") - return nil - } - - downstreamNamespace := downstreamNamespaceObj.(*unstructured.Unstructured) - logger = logging.WithObject(logger, downstreamNamespace) - - namespaceLocatorJSON := downstreamNamespace.GetAnnotations()[shared.NamespaceLocatorAnnotation] - if namespaceLocatorJSON == "" { - logger.Error(nil, "downstream namespace has no namespaceLocator annotation") - return nil - } - - nsLocator := shared.NamespaceLocator{} - if err := json.Unmarshal([]byte(namespaceLocatorJSON), &nsLocator); err != nil { - logger.Error(err, "failed to unmarshal namespace locator", "namespaceLocator", namespaceLocatorJSON) - return nil - } - - // Check if the nsLocator SyncTarget UID is the same as ours. If not, we should ignore this namespace as it could be - // managed by different syncer. - if nsLocator.SyncTarget.UID != c.syncTargetUID { - logger.V(4).Info("downstream namespace is not handled by this sync target, ignoring") - return nil - } - - // Always refresh the DNS ConfigMap (even when the namespace has been deleted or is being deleted) - err = c.updateDNSConfigMap(ctx, nsLocator.ClusterName) - if err != nil { - return err - } - - if !downstreamNamespace.GetDeletionTimestamp().IsZero() { - logger.V(4).Info("downstream namespace is being deleted, ignoring key") - return nil - } - - logger = logger.WithValues(logging.WorkspaceKey, nsLocator.ClusterName, logging.NamespaceKey, nsLocator.Namespace) - exists, err := c.upstreamNamespaceExists(nsLocator.ClusterName, nsLocator.Namespace) - if err != nil { - logger.Error(err, "failed to check if upstream namespace exists") - return nil - } - if !exists { - logger.Info("adding the downstream namespace to the delayed deletion queue because the upstream namespace doesn't exist") - c.PlanCleaning(key) - return nil - } - // The namespace exists upstream, so we can remove it from the delayed delete queue - c.CancelCleaning(key) - // The upstream namespace still exists, nothing to do. - return nil -} - -func (c *DownstreamController) updateDNSConfigMap(ctx context.Context, clusterName logicalcluster.Name) error { - logger := klog.FromContext(ctx) - logger.WithName("dns") - logger.Info("refreshing logical to physical namespace mapping table") - - // Reconstruct ConfigMap from scratch because: - // - it's a sound approach - // - it's low overhead considering operations on namespaces are relatively rare. - - namespaces, err := c.listDownstreamNamespaces() - if err != nil { - logger.Error(err, "failed to list downstream namespaces") - return err // retry - } - - data := make(map[string]string) - for _, obj := range namespaces { - namespace := obj.(*unstructured.Unstructured) - annotations := namespace.GetAnnotations() - if annotations == nil { - // skip - continue - } - - locator, found, err := shared.LocatorFromAnnotations(annotations) - if err != nil { - // Corrupted ns locator annotation value - logger.Error(err, "invalid namespace locator", "name", namespace.GetName()) - continue - } - - if !found { - continue - } - - // Only include namespaces in the same workspace - if locator.ClusterName == clusterName { - data[locator.Namespace] = namespace.GetName() - } - } - - configMapName := shared.GetDNSID(clusterName, c.syncTargetUID, c.syncTargetName) - - cm := &corev1.ConfigMap{ - TypeMeta: metav1.TypeMeta{}, - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: c.dnsNamespace, - }, - Data: data, - } - - // TODO(LV): consider comparing the new ConfigMap with the cached one to avoid a rest api call. - - _, err = c.updateConfigMap(ctx, cm) - if apierrors.IsNotFound(err) { - _, err = c.createConfigMap(ctx, cm) - if err == nil { - return nil - } - } - if err != nil { - logger.Error(err, "failed to create or update ConfigMap (retrying)", "name", configMapName, "namespace", c.dnsNamespace) - return err // retry - } - return nil -} diff --git a/pkg/syncer/namespace/namespace_downstream_process_test.go b/pkg/syncer/namespace/namespace_downstream_process_test.go deleted file mode 100644 index daa7a6afb83..00000000000 --- a/pkg/syncer/namespace/namespace_downstream_process_test.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "encoding/json" - "errors" - "testing" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/util/workqueue" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - "github.com/kcp-dev/kcp/sdk/client" -) - -func TestSyncerNamespaceProcess(t *testing.T) { - tests := map[string]struct { - upstreamNamespaceExists bool - deletedNamespace string - syncTargetUID types.UID - - upstreamNamespaceExistsError error - getDownstreamNamespaceError error - getDownstreamNamespaceFromNamespaceLocatorError error - - eventOrigin string // upstream or downstream - }{ - "NamespaceSyncer removes downstream namespace when no matching upstream has been found, expect downstream namespace deletion": { - upstreamNamespaceExists: false, - deletedNamespace: "kcp-33jbiactwhg0", - eventOrigin: "downstream", - }, - "NamespaceSyncer doesn't remove downstream namespace when nsLocator synctarget UID is different, expect no namespace deletion": { - upstreamNamespaceExists: false, - deletedNamespace: "", - eventOrigin: "downstream", - syncTargetUID: "1234", - }, - "NamespaceSyncer, downstream event, no deletion as there is a matching upstream namespace, expect no namespace deletion": { - upstreamNamespaceExists: true, - deletedNamespace: "", - eventOrigin: "downstream", - }, - "NamespaceSyncer, downstream event, error trying to get the upstream namespace, expect no namespace deletion": { - upstreamNamespaceExistsError: errors.New("error"), - deletedNamespace: "", - eventOrigin: "downstream", - }, - "NamespaceSyncer, downstream event, error trying to get the downstream namespace, expect no namespace deletion": { - getDownstreamNamespaceError: errors.New("error"), - deletedNamespace: "", - eventOrigin: "downstream", - }, - "NamespaceSyncer, downstream event, downstream namespace is not found, expect no namespace deletion": { - getDownstreamNamespaceError: apierrors.NewNotFound(schema.GroupResource(metav1.GroupResource{Group: "", Resource: ""}), "not-found"), - deletedNamespace: "", - eventOrigin: "downstream", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - downstreamNamespace := namespace(logicalcluster.NewPath(""), "kcp-33jbiactwhg0", map[string]string{ - "internal.workload.kcp.io/cluster": "2gzO8uuQmIoZ2FE95zoOPKtrtGGXzzjAvtl6q5", - }, map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"workspace":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"workspace":"root:org:ws","namespace":"test"}`, - }) - syncTargetClusterName := logicalcluster.Name("root:org:ws") - syncTargetName := "us-west1" - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncTargetClusterName, syncTargetName) - syncTargetUID := types.UID("syncTargetUID") - if tc.syncTargetUID != "" { - syncTargetUID = tc.syncTargetUID - } - nsController := DownstreamController{ - toDeleteMap: make(map[string]time.Time), - delayedQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), downstreamControllerName), - deleteDownstreamNamespace: func(ctx context.Context, downstreamNamespaceName string) error { - return nil - }, - upstreamNamespaceExists: func(clusterName logicalcluster.Name, upstreamNamespaceName string) (bool, error) { - return tc.upstreamNamespaceExists, tc.upstreamNamespaceExistsError - }, - getDownstreamNamespace: func(name string) (runtime.Object, error) { - nsJSON, _ := json.Marshal(downstreamNamespace) - unstructured := &unstructured.Unstructured{} - _ = json.Unmarshal(nsJSON, unstructured) - return unstructured, tc.getDownstreamNamespaceError - }, - listDownstreamNamespaces: func() (ret []runtime.Object, err error) { - return []runtime.Object{}, nil - }, - createConfigMap: func(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) { - return nil, nil - }, - updateConfigMap: func(ctx context.Context, configMap *corev1.ConfigMap) (*corev1.ConfigMap, error) { - return nil, nil - }, - isDowntreamNamespaceEmpty: func(ctx context.Context, namespaceName string) (bool, error) { - return true, nil - }, - syncTargetName: syncTargetName, - syncTargetClusterName: syncTargetClusterName, - syncTargetUID: syncTargetUID, - syncTargetKey: syncTargetKey, - dnsNamespace: "kcp-33jbiactwhg0", - } - - var key string - if tc.eventOrigin == "downstream" { - key = downstreamNamespace.GetName() - } else if tc.eventOrigin == "upstream" { - key = client.ToClusterAwareKey(logicalcluster.NewPath("root:org:ws"), "test") - } else { - t.Fatalf("unexpected event origin: %s", tc.eventOrigin) - } - - err := nsController.process(ctx, key) - require.NoError(t, err) - - if tc.deletedNamespace != "" { - require.True(t, nsController.isPlannedForCleaning(tc.deletedNamespace)) - require.Equal(t, len(nsController.toDeleteMap), 1) - } else { - require.Empty(t, len(nsController.toDeleteMap)) - } - }) - } -} - -func namespace(clusterName logicalcluster.Path, name string, labels, annotations map[string]string) *corev1.Namespace { - if !clusterName.Empty() { - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName.String() - } - - return &corev1.Namespace{ - TypeMeta: metav1.TypeMeta{ - Kind: "Namespace", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - Annotations: annotations, - }, - } -} diff --git a/pkg/syncer/shared/finalizer.go b/pkg/syncer/shared/finalizer.go deleted file mode 100644 index 7affc152ec5..00000000000 --- a/pkg/syncer/shared/finalizer.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shared - -import ( - "context" - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const ( - // SyncerFinalizerNamePrefix is the finalizer put onto resources by the syncer to claim ownership, - // *before* a downstream object is created. It is only removed when the downstream object is deleted. - SyncerFinalizerNamePrefix = "workload.kcp.io/syncer-" -) - -func EnsureUpstreamFinalizerRemoved(ctx context.Context, gvr schema.GroupVersionResource, upstreamLister cache.GenericLister, upstreamClient dynamic.Interface, upstreamNamespace, syncTargetKey string, resourceName string) error { - logger := klog.FromContext(ctx) - upstreamObjFromLister, err := upstreamLister.ByNamespace(upstreamNamespace).Get(resourceName) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - if apierrors.IsNotFound(err) { - return nil - } - - upstreamObj, ok := upstreamObjFromLister.(*unstructured.Unstructured) - if !ok { - logger.Info(fmt.Sprintf("Error: upstream resource expected to be *unstructured.Unstructured, got %T", upstreamObjFromLister)) - return nil - } - - // TODO(jmprusi): This check will need to be against "GetDeletionTimestamp()" when using the syncer virtual workspace. - if upstreamObj.GetAnnotations()[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+syncTargetKey] == "" { - // Do nothing: the object should not be deleted anymore for this location on the KCP side - return nil - } - - upstreamObj = upstreamObj.DeepCopy() - - // Remove the syncer finalizer. - currentFinalizers := upstreamObj.GetFinalizers() - desiredFinalizers := []string{} - for _, finalizer := range currentFinalizers { - if finalizer != SyncerFinalizerNamePrefix+syncTargetKey { - desiredFinalizers = append(desiredFinalizers, finalizer) - } - } - upstreamObj.SetFinalizers(desiredFinalizers) - - // TODO(jmprusi): This code block will be handled by the syncer virtual workspace, so we can remove it once - // the virtual workspace syncer is integrated - // - Begin - - // Clean up the status annotation and the locationDeletionAnnotation. - annotations := upstreamObj.GetAnnotations() - delete(annotations, workloadv1alpha1.InternalClusterStatusAnnotationPrefix+syncTargetKey) - delete(annotations, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+syncTargetKey) - upstreamObj.SetAnnotations(annotations) - - // remove the cluster label. - upstreamLabels := upstreamObj.GetLabels() - delete(upstreamLabels, workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey) - upstreamObj.SetLabels(upstreamLabels) - // - End of block to be removed once the virtual workspace syncer is integrated - - - if upstreamNamespace != "" { - _, err = upstreamClient.Resource(gvr).Namespace(upstreamObj.GetNamespace()).Update(ctx, upstreamObj, metav1.UpdateOptions{}) - } else { - _, err = upstreamClient.Resource(gvr).Update(ctx, upstreamObj, metav1.UpdateOptions{}) - } - - if err != nil { - logger.Error(err, "Failed updating upstream resource after removing the syncer finalizer") - return err - } - logger.V(2).Info("Updated upstream resource to remove the syncer finalizer") - return nil -} diff --git a/pkg/syncer/shared/helpers.go b/pkg/syncer/shared/helpers.go index bb7187675ce..8c9a094d485 100644 --- a/pkg/syncer/shared/helpers.go +++ b/pkg/syncer/shared/helpers.go @@ -29,26 +29,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) // SyncableClusterScopedResources holds a set of cluster-wide GVR that are allowed to be synced. var SyncableClusterScopedResources = sets.New[string](schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"}.String()) -// DeprecatedGetAssignedSyncTarget returns one assigned sync target in Sync state. It will -// likely lead to broken behaviour when there is one of those labels on a resource. -// -// Deprecated: use GetResourceState per cluster instead. -func DeprecatedGetAssignedSyncTarget(labels map[string]string) string { - for k, v := range labels { - if strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) && v == string(workloadv1alpha1.ResourceStateSync) { - return strings.TrimPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) - } - } - return "" -} - // GetUpstreamResourceName returns the name with which the resource is known upstream. func GetUpstreamResourceName(downstreamResourceGVR schema.GroupVersionResource, downstreamResourceName string) string { configMapGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} diff --git a/pkg/syncer/spec/dns/deployment_dns.yaml b/pkg/syncer/spec/dns/deployment_dns.yaml deleted file mode 100644 index 3b88ac79657..00000000000 --- a/pkg/syncer/spec/dns/deployment_dns.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: Name - namespace: Namespace -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: Name - template: - metadata: - labels: - app: Name - spec: - containers: - - name: kcp-dns - command: - - /ko-app/syncer - args: - - dns - - start - - --configmap-name - - ConfigMapName - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: Image - imagePullPolicy: IfNotPresent - terminationMessagePolicy: FallbackToLogsOnError - serviceAccountName: Name diff --git a/pkg/syncer/spec/dns/dns_process.go b/pkg/syncer/spec/dns/dns_process.go deleted file mode 100644 index c1a8f7f9df0..00000000000 --- a/pkg/syncer/spec/dns/dns_process.go +++ /dev/null @@ -1,354 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dns - -import ( - "context" - "errors" - "sync" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - kubernetesinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - listersappsv1 "k8s.io/client-go/listers/apps/v1" - listerscorev1 "k8s.io/client-go/listers/core/v1" - listersnetworkingv1 "k8s.io/client-go/listers/networking/v1" - listersrbacv1 "k8s.io/client-go/listers/rbac/v1" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" -) - -type DNSProcessor struct { - downstreamKubeClient kubernetes.Interface - - serviceAccountLister listerscorev1.ServiceAccountLister - roleLister listersrbacv1.RoleLister - roleBindingLister listersrbacv1.RoleBindingLister - deploymentLister listersappsv1.DeploymentLister - serviceLister listerscorev1.ServiceLister - endpointLister listerscorev1.EndpointsLister - networkPolicyLister listersnetworkingv1.NetworkPolicyLister - - syncTargetUID types.UID - syncTargetName string - dnsNamespace string // namespace containing all DNS objects - dnsImage string - - initialized sync.Map - initializationMu sync.RWMutex -} - -func NewDNSProcessor( - downstreamKubeClient kubernetes.Interface, - syncerNamespaceInformerFactory kubernetesinformers.SharedInformerFactory, - syncTargetName string, - syncTargetUID types.UID, - dnsNamespace string, - dnsImage string) *DNSProcessor { - return &DNSProcessor{ - downstreamKubeClient: downstreamKubeClient, - serviceAccountLister: syncerNamespaceInformerFactory.Core().V1().ServiceAccounts().Lister(), - roleLister: syncerNamespaceInformerFactory.Rbac().V1().Roles().Lister(), - roleBindingLister: syncerNamespaceInformerFactory.Rbac().V1().RoleBindings().Lister(), - deploymentLister: syncerNamespaceInformerFactory.Apps().V1().Deployments().Lister(), - serviceLister: syncerNamespaceInformerFactory.Core().V1().Services().Lister(), - endpointLister: syncerNamespaceInformerFactory.Core().V1().Endpoints().Lister(), - networkPolicyLister: syncerNamespaceInformerFactory.Networking().V1().NetworkPolicies().Lister(), - syncTargetName: syncTargetName, - syncTargetUID: syncTargetUID, - dnsNamespace: dnsNamespace, - dnsImage: dnsImage, - } -} - -func (d *DNSProcessor) ServiceLister() listerscorev1.ServiceLister { - return d.serviceLister -} - -// EnsureDNSUpAndReady creates all DNS-related resources if necessary. -// It also checks that the DNS Deployment for this workspace -// are effectively reachable through the Service. -// It returns true if the DNS is setup and reachable, and returns an error if there was an error -// during the check or creation of the DNS-related resources. -func (d *DNSProcessor) EnsureDNSUpAndReady(ctx context.Context, namespaceLocator shared.NamespaceLocator) (bool, error) { - logger := klog.FromContext(ctx) - logger = logger.WithName("dns") - - dnsID := shared.GetDNSID(namespaceLocator.ClusterName, d.syncTargetUID, d.syncTargetName) - logger = logger.WithValues("name", dnsID, "namespace", d.dnsNamespace) - - logger.V(4).Info("checking if all dns objects exist and are up-to-date") - ctx = klog.NewContext(ctx, logger) - - // Try updating resources if not done already - if initialized, ok := d.initialized.Load(dnsID); !ok || !initialized.(bool) { - updated, err := d.lockMayUpdate(ctx, dnsID) - if updated { - return false, err - } - } - - // Get the expected Endpoints resource - endpoints, err := d.endpointLister.Endpoints(d.dnsNamespace).Get(dnsID) - if err == nil { - // DNS is ready if the Endpoints resource has at least one ready address - return hasAtLeastOneReadyAddress(endpoints), nil - } - - if !apierrors.IsNotFound(err) { - return false, err - } - - // No Endpoints resource was found: try to create all the DNS-related resources - if err := d.processServiceAccount(ctx, dnsID); err != nil { - return false, err - } - if err := d.processRole(ctx, dnsID); err != nil { - return false, err - } - if err := d.processRoleBinding(ctx, dnsID); err != nil { - return false, err - } - if err := d.processDeployment(ctx, dnsID); err != nil { - return false, err - } - if err := d.processService(ctx, dnsID); err != nil { - return false, err - } - if err := d.processNetworkPolicy(ctx, dnsID, namespaceLocator); err != nil { - return false, err - } - - // Since the Endpoints resource was not found, the DNS is not yet ready, - // even though all the required resources have been created - // (deployment still needs to start). - return false, nil -} - -func (d *DNSProcessor) processServiceAccount(ctx context.Context, name string) error { - logger := klog.FromContext(ctx) - - _, err := d.serviceAccountLister.ServiceAccounts(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - expected := MakeServiceAccount(name, d.dnsNamespace) - _, err = d.downstreamKubeClient.CoreV1().ServiceAccounts(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) - if err == nil { - logger.Info("ServiceAccount created") - } - } - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to get ServiceAccount (retrying)") - return err - } - - return nil -} - -func (d *DNSProcessor) processRole(ctx context.Context, name string) error { - logger := klog.FromContext(ctx) - - _, err := d.roleLister.Roles(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - expected := MakeRole(name, d.dnsNamespace) - _, err = d.downstreamKubeClient.RbacV1().Roles(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) - if err == nil { - logger.Info("Role created") - } - } - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to get Role (retrying)") - return err - } - - return nil -} - -func (d *DNSProcessor) processRoleBinding(ctx context.Context, name string) error { - logger := klog.FromContext(ctx) - - _, err := d.roleBindingLister.RoleBindings(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - expected := MakeRoleBinding(name, d.dnsNamespace) - _, err = d.downstreamKubeClient.RbacV1().RoleBindings(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) - if err == nil { - logger.Info("RoleBinding created") - } - } - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to get RoleBinding (retrying)") - return err - } - - return nil -} - -func (d *DNSProcessor) processDeployment(ctx context.Context, name string) error { - logger := klog.FromContext(ctx) - - _, err := d.deploymentLister.Deployments(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - expected := MakeDeployment(name, d.dnsNamespace, d.dnsImage) - _, err = d.downstreamKubeClient.AppsV1().Deployments(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) - if err == nil { - logger.Info("Deployment created") - } - } - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to get Deployment (retrying)") - return err - } - - return nil -} - -func (d *DNSProcessor) processService(ctx context.Context, name string) error { - logger := klog.FromContext(ctx) - - _, err := d.serviceLister.Services(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - expected := MakeService(name, d.dnsNamespace) - _, err = d.downstreamKubeClient.CoreV1().Services(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) - if err == nil { - logger.Info("Service created") - } - } - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to get Service (retrying)") - return err - } - - return nil -} - -func (d *DNSProcessor) processNetworkPolicy(ctx context.Context, name string, namespaceLocator shared.NamespaceLocator) error { - logger := klog.FromContext(ctx) - - var kubeEndpoints *corev1.Endpoints - _, err := d.networkPolicyLister.NetworkPolicies(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - kubeEndpoints, err = d.downstreamKubeClient.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{}) - if err != nil { - return err - } - if len(kubeEndpoints.Subsets) == 0 || len(kubeEndpoints.Subsets[0].Addresses) == 0 { - return errors.New("missing kubernetes API endpoints") - } - - tenantID, err := shared.GetTenantID(namespaceLocator) - if err != nil { - return err - } - - expected := MakeNetworkPolicy(name, d.dnsNamespace, tenantID, &kubeEndpoints.Subsets[0]) - _, err = d.downstreamKubeClient.NetworkingV1().NetworkPolicies(d.dnsNamespace).Create(ctx, expected, metav1.CreateOptions{}) - if err == nil { - logger.Info("NetworkPolicy created") - } - } - if err != nil && !apierrors.IsAlreadyExists(err) { - logger.Error(err, "failed to get NetworkPolicy (retrying)") - return err - } - - return nil -} - -func hasAtLeastOneReadyAddress(endpoints *corev1.Endpoints) bool { - for _, s := range endpoints.Subsets { - if len(s.Addresses) > 0 && s.Addresses[0].IP != "" { - return true - } - } - return false -} - -// lockMayUpdate guarantees mayUpdate is run in a critical section. -// It returns true when the DNS deployment has been updated. -func (d *DNSProcessor) lockMayUpdate(ctx context.Context, dnsID string) (bool, error) { - d.initializationMu.Lock() - defer d.initializationMu.Unlock() - - // initialized may have been modified outside the critical section so checking again here - if initialized, ok := d.initialized.Load(dnsID); !ok || !initialized.(bool) { - updated, err := d.mayUpdate(ctx, dnsID) - - if err != nil { - return true, err - } - - d.initialized.Store(dnsID, true) - - if updated { - // The endpoint might temporarily be without ready addresses, depending on the - // deployment strategy. Anyhow, gives some time for the system to stabilize - - return true, nil - } - } - return false, nil -} - -func (d *DNSProcessor) mayUpdate(ctx context.Context, name string) (bool, error) { - deployment, err := d.deploymentLister.Deployments(d.dnsNamespace).Get(name) - if apierrors.IsNotFound(err) { - return false, nil - } - deployment = deployment.DeepCopy() - needsUpdate := false - c := findContainer(deployment, "kcp-dns") - - if c == nil { - // corrupted deployment. Trying to recover - expected := MakeDeployment(name, d.dnsNamespace, d.dnsImage) - deployment.Spec = expected.Spec - needsUpdate = true - } else if c.Image != d.dnsImage { - c.Image = d.dnsImage - needsUpdate = true - } - - if !needsUpdate { - return false, nil - } - - logger := klog.FromContext(ctx) - - _, err = d.downstreamKubeClient.AppsV1().Deployments(d.dnsNamespace).Update(ctx, deployment, metav1.UpdateOptions{}) - if err != nil { - logger.Error(err, "failed to update Deployment (retrying)") - return false, err - } - - logger.Info("Deployment updated") - return true, nil -} - -func findContainer(deployment *appsv1.Deployment, name string) *corev1.Container { - containers := deployment.Spec.Template.Spec.Containers - - for i := 0; i < len(containers); i++ { - if containers[i].Name == name { - return &containers[i] - } - } - return nil -} diff --git a/pkg/syncer/spec/dns/dns_process_test.go b/pkg/syncer/spec/dns/dns_process_test.go deleted file mode 100644 index c55f2693618..00000000000 --- a/pkg/syncer/spec/dns/dns_process_test.go +++ /dev/null @@ -1,288 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dns - -import ( - "context" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/informers" - kubefake "k8s.io/client-go/kubernetes/fake" - clienttesting "k8s.io/client-go/testing" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" -) - -var ( - scheme *runtime.Scheme - serviceAccountGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} - roleGVR = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"} - roleBindingGVR = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} - serviceGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} - deploymentGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} - endpointGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"} - networkPolicyGVR = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"} -) - -func init() { - scheme = runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) -} - -func TestDNSProcess(t *testing.T) { - clusterName := logicalcluster.Name("root") - syncTargetClusterName := logicalcluster.Name("targetclustername") - syncTargetUID := types.UID("targetuid") - syncTargetName := "targetname" - - locator := shared.NewNamespaceLocator(clusterName, syncTargetClusterName, syncTargetUID, syncTargetName, "") - tenantID, err := shared.GetTenantID(locator) - require.NoError(t, err) - - dnsID := shared.GetDNSID(clusterName, syncTargetUID, syncTargetName) - dnsns := "dnsns" - - tests := map[string]struct { - resources []runtime.Object - initialized bool - expectReady bool - expectActions []clienttesting.Action - dnsImage string - }{ - "endpoint is ready": { - resources: []runtime.Object{ - endpoints(dnsID, dnsns, "8.8.8.8"), - }, - expectReady: true, - expectActions: []clienttesting.Action{}, - initialized: true, - dnsImage: "dnsimage", - }, - "endpoint exists but not ready": { - resources: []runtime.Object{ - endpoints(dnsID, dnsns, ""), - }, - expectReady: false, - expectActions: []clienttesting.Action{}, - initialized: true, - dnsImage: "dnsimage", - }, - "endpoint exist, DNS objects exists, updating with no changes": { - resources: []runtime.Object{ - MakeServiceAccount(dnsID, dnsns), - MakeRole(dnsID, dnsns), - MakeRoleBinding(dnsID, dnsns), - MakeService(dnsID, dnsns), - MakeDeployment(dnsID, dnsns, "dnsimage"), - endpoints(dnsID, dnsns, "8.8.8.8"), - MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), - }, - expectReady: true, - expectActions: []clienttesting.Action{}, - initialized: false, - dnsImage: "dnsimage", - }, - "endpoint exist, DNS objects exists, updating with changes": { - resources: []runtime.Object{ - MakeServiceAccount(dnsID, dnsns), - MakeRole(dnsID, dnsns), - MakeRoleBinding(dnsID, dnsns), - MakeService(dnsID, dnsns), - MakeDeployment(dnsID, dnsns, "dnsimage"), - endpoints(dnsID, dnsns, "8.8.8.8"), - MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), - }, - expectReady: false, - expectActions: []clienttesting.Action{ - clienttesting.NewUpdateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "newdnsimage")), - }, - initialized: false, - dnsImage: "newdnsimage", - }, - "endpoint does not exist, no DNS objects": { - resources: []runtime.Object{ - endpoints("kubernetes", "default", "10.0.0.0"), - }, - expectReady: false, - expectActions: []clienttesting.Action{ - clienttesting.NewCreateAction(serviceAccountGVR, dnsns, MakeServiceAccount(dnsID, dnsns)), - clienttesting.NewCreateAction(roleGVR, dnsns, MakeRole(dnsID, dnsns)), - clienttesting.NewCreateAction(roleBindingGVR, dnsns, MakeRoleBinding(dnsID, dnsns)), - clienttesting.NewCreateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "dnsimage")), - clienttesting.NewCreateAction(serviceGVR, dnsns, MakeService(dnsID, dnsns)), - clienttesting.NewGetAction(endpointGVR, "default", "kubernetes"), - clienttesting.NewCreateAction(networkPolicyGVR, dnsns, MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{ - Addresses: []corev1.EndpointAddress{{IP: "10.0.0.0"}}, - })), - }, - initialized: true, - dnsImage: "dnsimage", - }, - "endpoint does not exist, DNS objects exists, no updates": { - resources: []runtime.Object{ - MakeServiceAccount(dnsID, dnsns), - MakeRole(dnsID, dnsns), - MakeRoleBinding(dnsID, dnsns), - MakeService(dnsID, dnsns), - MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), - }, - expectReady: false, - expectActions: []clienttesting.Action{}, - initialized: true, - dnsImage: "dnsimage", - }, - "endpoint does not exist, DNS objects exists, updating with no changes": { - resources: []runtime.Object{ - MakeServiceAccount(dnsID, dnsns), - MakeRole(dnsID, dnsns), - MakeRoleBinding(dnsID, dnsns), - MakeService(dnsID, dnsns), - MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), - }, - expectReady: false, - expectActions: []clienttesting.Action{}, - initialized: false, - dnsImage: "dnsimage", - }, - "endpoint does not exist, DNS objects exists, updating with changes": { - resources: []runtime.Object{ - MakeServiceAccount(dnsID, dnsns), - MakeRole(dnsID, dnsns), - MakeRoleBinding(dnsID, dnsns), - MakeService(dnsID, dnsns), - MakeDeployment(dnsID, dnsns, "dnsimage"), - MakeNetworkPolicy(dnsID, dnsns, tenantID, &corev1.EndpointSubset{}), - }, - expectReady: false, - expectActions: []clienttesting.Action{ - clienttesting.NewUpdateAction(deploymentGVR, dnsns, MakeDeployment(dnsID, dnsns, "newdnsimage")), - }, - initialized: false, - dnsImage: "newdnsimage", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - kubeClient := kubefake.NewSimpleClientset(tc.resources...) - - // informerFactory to watch some DNS-related resources in the dns namespace - informerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Hour, informers.WithNamespace(dnsns)) - - controller := NewDNSProcessor(kubeClient, informerFactory, syncTargetName, syncTargetUID, - dnsns, tc.dnsImage) - - controller.initialized.Store(dnsID, tc.initialized) - - informerFactory.Start(ctx.Done()) - informerFactory.WaitForCacheSync(ctx.Done()) - - kubeClient.ClearActions() - - ready, err := controller.EnsureDNSUpAndReady(ctx, locator) - assert.NoError(t, err) - - assert.Empty(t, cmp.Diff(tc.expectReady, ready)) - assert.Empty(t, cmp.Diff(tc.expectActions, kubeClient.Actions())) - }) - } -} - -func TestMultipleDNSInitialization(t *testing.T) { - syncTargetClusterName := logicalcluster.Name("targetclustername") - syncTargetUID := types.UID("targetuid") - syncTargetName := "targetname" - dnsns := "dnsns" - - clusterName1 := logicalcluster.Name("root1") - clusterName2 := logicalcluster.Name("root2") - - locator1 := shared.NewNamespaceLocator(clusterName1, syncTargetClusterName, syncTargetUID, syncTargetName, "") - locator2 := shared.NewNamespaceLocator(clusterName2, syncTargetClusterName, syncTargetUID, syncTargetName, "") - - dnsID1 := shared.GetDNSID(clusterName1, syncTargetUID, syncTargetName) - dnsID2 := shared.GetDNSID(clusterName2, syncTargetUID, syncTargetName) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - kubeClient := kubefake.NewSimpleClientset( - endpoints(dnsID1, dnsns, "8.8.8.8"), - endpoints(dnsID2, dnsns, "8.8.8.9")) - - // informerFactory to watch some DNS-related resources in the dns namespace - informerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Hour, informers.WithNamespace(dnsns)) - - controller := NewDNSProcessor(kubeClient, informerFactory, syncTargetName, syncTargetUID, - dnsns, "animage") - - informerFactory.Start(ctx.Done()) - informerFactory.WaitForCacheSync(ctx.Done()) - - ready, err := controller.EnsureDNSUpAndReady(ctx, locator1) - assert.NoError(t, err) - assert.True(t, ready) - init1, _ := controller.initialized.Load(dnsID1) - assert.True(t, init1.(bool)) - init2, _ := controller.initialized.Load(dnsID2) - assert.Nil(t, init2) - - ready, err = controller.EnsureDNSUpAndReady(ctx, locator2) - assert.NoError(t, err) - assert.True(t, ready) - init1, _ = controller.initialized.Load(dnsID1) - assert.True(t, init1.(bool)) - init2, _ = controller.initialized.Load(dnsID2) - assert.True(t, init2.(bool)) -} - -func endpoints(name, namespace, ip string) *corev1.Endpoints { - endpoint := &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - if ip != "" { - endpoint.Subsets = []corev1.EndpointSubset{ - { - Addresses: []corev1.EndpointAddress{ - { - IP: ip, - }}, - }, - } - } - return endpoint -} diff --git a/pkg/syncer/spec/dns/networkpolicy_dns.yaml b/pkg/syncer/spec/dns/networkpolicy_dns.yaml deleted file mode 100644 index a388e383ad4..00000000000 --- a/pkg/syncer/spec/dns/networkpolicy_dns.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: Name - namespace: Namespace -spec: - podSelector: - matchLabels: - app: Name - policyTypes: - - Ingress - - Egress - ingress: - - from: - - namespaceSelector: - matchLabels: - kcp.io/tenant-id: TenantID - ports: - - protocol: TCP - port: 5353 - - protocol: UDP - port: 5353 - egress: - # Only give access to coredns in kube-system - - to: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: kube-system - - podSelector: - matchLabels: - k8s-app: kube-dns - ports: - - protocol: TCP - port: 53 - - protocol: UDP - port: 53 - # Give access to the API server to watch its associated configmap - - to: - # one ipBlock per IP (dynamically filled) - - ipBlock: - cidr: APIServerIP - ports: - - protocol: TCP - port: 6443 - diff --git a/pkg/syncer/spec/dns/resources.go b/pkg/syncer/spec/dns/resources.go deleted file mode 100644 index c191b1588cd..00000000000 --- a/pkg/syncer/spec/dns/resources.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dns - -import ( - "bytes" - "embed" - "fmt" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/yaml" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" -) - -//go:embed *.yaml -var dnsFiles embed.FS - -var ( - serviceAccountTemplate corev1.ServiceAccount - roleTemplate rbacv1.Role - roleBindingTemplate rbacv1.RoleBinding - deploymentTemplate appsv1.Deployment - serviceTemplate corev1.Service - networkPolicyTemplate networkingv1.NetworkPolicy -) - -func init() { - loadTemplateOrDie("serviceaccount_dns.yaml", &serviceAccountTemplate) - loadTemplateOrDie("role_dns.yaml", &roleTemplate) - loadTemplateOrDie("rolebinding_dns.yaml", &roleBindingTemplate) - loadTemplateOrDie("deployment_dns.yaml", &deploymentTemplate) - loadTemplateOrDie("service_dns.yaml", &serviceTemplate) - loadTemplateOrDie("networkpolicy_dns.yaml", &networkPolicyTemplate) -} - -func MakeServiceAccount(name, namespace string) *corev1.ServiceAccount { - sa := serviceAccountTemplate.DeepCopy() - - sa.Name = name - sa.Namespace = namespace - - return sa -} - -func MakeRole(name, namespace string) *rbacv1.Role { - role := roleTemplate.DeepCopy() - - role.Name = name - role.Namespace = namespace - role.Rules[0].ResourceNames[0] = name - - return role -} - -func MakeRoleBinding(name, namespace string) *rbacv1.RoleBinding { - roleBinding := roleBindingTemplate.DeepCopy() - - roleBinding.Name = name - roleBinding.Namespace = namespace - roleBinding.RoleRef.Name = name - roleBinding.Subjects[0].Name = name - roleBinding.Subjects[0].Namespace = namespace - - return roleBinding -} - -func MakeDeployment(name, namespace, image string) *appsv1.Deployment { - deployment := deploymentTemplate.DeepCopy() - - deployment.Name = name - deployment.Namespace = namespace - deployment.Spec.Selector.MatchLabels["app"] = name - deployment.Spec.Template.Labels["app"] = name - deployment.Spec.Template.Spec.Containers[0].Image = image - deployment.Spec.Template.Spec.Containers[0].Args[3] = name - deployment.Spec.Template.Spec.ServiceAccountName = name - - return deployment -} - -func MakeService(name, namespace string) *corev1.Service { - service := serviceTemplate.DeepCopy() - - service.Name = name - service.Namespace = namespace - service.Labels["app"] = name - service.Spec.Selector["app"] = name - - return service -} - -func MakeNetworkPolicy(name, namespace, tenantID string, kubeEndpoints *corev1.EndpointSubset) *networkingv1.NetworkPolicy { - np := networkPolicyTemplate.DeepCopy() - - np.Name = name - np.Namespace = namespace - np.Spec.PodSelector.MatchLabels["app"] = name - np.Spec.Ingress[0].From[0].NamespaceSelector.MatchLabels[shared.TenantIDLabel] = tenantID - - to := make([]networkingv1.NetworkPolicyPeer, len(kubeEndpoints.Addresses)) - for i, endpoint := range kubeEndpoints.Addresses { - to[i] = networkingv1.NetworkPolicyPeer{ - IPBlock: &networkingv1.IPBlock{ - CIDR: endpoint.IP + "/32", - }, - } - } - np.Spec.Egress[1].To = to - - ports := make([]networkingv1.NetworkPolicyPort, len(kubeEndpoints.Ports)) - for i, port := range kubeEndpoints.Ports { - pport := intstr.FromInt(int(port.Port)) - ports[i].Port = &pport - pprotocol := port.Protocol - ports[i].Protocol = &pprotocol - } - np.Spec.Egress[1].Ports = ports - - return np -} - -// load a YAML resource into a typed kubernetes object. -func loadTemplateOrDie(filename string, obj interface{}) { - raw, err := dnsFiles.ReadFile(filename) - if err != nil { - panic(fmt.Sprintf("failed to read file: %v", err)) - } - decoder := yaml.NewYAMLToJSONDecoder(bytes.NewReader(raw)) - - var u unstructured.Unstructured - err = decoder.Decode(&u) - if err != nil { - panic(fmt.Sprintf("failed to decode file: %v", err)) - } - - err = runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) - if err != nil { - panic(fmt.Sprintf("failed to convert object: %v", err)) - } -} diff --git a/pkg/syncer/spec/dns/role_dns.yaml b/pkg/syncer/spec/dns/role_dns.yaml deleted file mode 100644 index 0a87f0b6d76..00000000000 --- a/pkg/syncer/spec/dns/role_dns.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: Name -rules: - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - - ConfigMapName - verbs: - - "get" - - "list" - - "watch" diff --git a/pkg/syncer/spec/dns/rolebinding_dns.yaml b/pkg/syncer/spec/dns/rolebinding_dns.yaml deleted file mode 100644 index b86ccb64a77..00000000000 --- a/pkg/syncer/spec/dns/rolebinding_dns.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: Name -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: Name -subjects: - - kind: ServiceAccount - name: Name - namespace: Name diff --git a/pkg/syncer/spec/dns/service_dns.yaml b/pkg/syncer/spec/dns/service_dns.yaml deleted file mode 100644 index 10f07bdbd5a..00000000000 --- a/pkg/syncer/spec/dns/service_dns.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: Name - namespace: Namespace - labels: - app: Name -spec: - type: ClusterIP - selector: - app: Name - ports: - - name: dns - port: 53 - protocol: UDP - targetPort: 5353 - - name: dns-tcp - port: 53 - protocol: TCP - targetPort: 5353 diff --git a/pkg/syncer/spec/dns/serviceaccount_dns.yaml b/pkg/syncer/spec/dns/serviceaccount_dns.yaml deleted file mode 100644 index 0bc9108479a..00000000000 --- a/pkg/syncer/spec/dns/serviceaccount_dns.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: Name - namespace: Namespace diff --git a/pkg/syncer/spec/mutators/podspecable.go b/pkg/syncer/spec/mutators/podspecable.go deleted file mode 100644 index 5b7cc35a438..00000000000 --- a/pkg/syncer/spec/mutators/podspecable.go +++ /dev/null @@ -1,404 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mutators - -import ( - "fmt" - "net/url" - "sort" - - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - listerscorev1 "k8s.io/client-go/listers/core/v1" - utilspointer "k8s.io/utils/pointer" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type ListSecretFunc func(clusterName logicalcluster.Name, namespace string) ([]runtime.Object, error) -type GetWorkspaceURLFunc func(obj *unstructured.Unstructured) (*url.URL, error) -type GetClusterDDSIFFunc func(clusterName logicalcluster.Name) (*ddsif.DiscoveringDynamicSharedInformerFactory, error) - -type PodSpecableMutator struct { - getWorkspaceURL GetWorkspaceURLFunc - listSecrets ListSecretFunc - serviceLister listerscorev1.ServiceLister - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID - syncTargetName string - dnsNamespace string - upsyncPods bool -} - -func (dm *PodSpecableMutator) GVRs() []schema.GroupVersionResource { - return []schema.GroupVersionResource{ - { - Group: "apps", - Version: "v1", - Resource: "deployments", - }, - { - Group: "apps", - Version: "v1", - Resource: "statefulsets", - }, - { - Group: "apps", - Version: "v1", - Resource: "replicasets", - }, - } -} - -func NewPodspecableMutator(ddsifForUpstreamSyncer GetClusterDDSIFFunc, serviceLister listerscorev1.ServiceLister, - syncTargetClusterName logicalcluster.Name, syncTargetName string, syncTargetUID types.UID, - dnsNamespace string, upsyncPods bool) *PodSpecableMutator { - secretsGVR := corev1.SchemeGroupVersion.WithResource("secrets") - return &PodSpecableMutator{ - getWorkspaceURL: func(obj *unstructured.Unstructured) (*url.URL, error) { - workspaceURL, ok := obj.GetAnnotations()[workloadv1alpha1.InternalWorkspaceURLAnnotationKey] - if !ok { - return nil, fmt.Errorf("annotation %q not found on %s|%s/%s", workloadv1alpha1.InternalWorkspaceURLAnnotationKey, logicalcluster.From(obj), obj.GetNamespace(), obj.GetName()) - } - return url.Parse(workspaceURL) - }, - listSecrets: func(clusterName logicalcluster.Name, namespace string) ([]runtime.Object, error) { - ddsif, err := ddsifForUpstreamSyncer(clusterName) - if err != nil { - return nil, err - } - informers, notSynced := ddsif.Informers() - informer, ok := informers[secretsGVR] - if !ok { - if shared.ContainsGVR(notSynced, secretsGVR) { - return nil, fmt.Errorf("informer for gvr %v not synced in the upstream syncer informer factory", secretsGVR) - } - return nil, fmt.Errorf("gvr %v should be known in the upstream syncer informer factory", secretsGVR) - } - return informer.Lister().ByCluster(clusterName).ByNamespace(namespace).List(labels.Everything()) - }, - serviceLister: serviceLister, - syncTargetClusterName: syncTargetClusterName, - syncTargetUID: syncTargetUID, - syncTargetName: syncTargetName, - dnsNamespace: dnsNamespace, - upsyncPods: upsyncPods, - } -} - -// Mutate applies the mutator changes to the object. -func (dm *PodSpecableMutator) Mutate(obj *unstructured.Unstructured) error { - podTemplateUnstr, ok, err := unstructured.NestedMap(obj.UnstructuredContent(), "spec", "template") - if err != nil { - return err - } - if !ok { - return fmt.Errorf("object should have a PodTemplate.Spec 'spec.template', but doesn't: %v", obj) - } - - podTemplate := &corev1.PodTemplateSpec{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured( - podTemplateUnstr, - &podTemplate) - if err != nil { - return err - } - upstreamLogicalName := logicalcluster.From(obj) - - desiredServiceAccountName := "default" - if podTemplate.Spec.ServiceAccountName != "" && podTemplate.Spec.ServiceAccountName != "default" { - desiredServiceAccountName = podTemplate.Spec.ServiceAccountName - } - - rawSecretList, err := dm.listSecrets(upstreamLogicalName, obj.GetNamespace()) - if err != nil { - return fmt.Errorf("error listing secrets for workspace %s: %w", upstreamLogicalName.String(), err) - } - - secretList := make([]*unstructured.Unstructured, 0, len(rawSecretList)) - for i := range rawSecretList { - secretList = append(secretList, rawSecretList[i].(*unstructured.Unstructured)) - } - - // In order to avoid triggering a deployment update on resyncs, we need to make sure that the list - // of secrets is sorted by creationTimsestamp. So if the user creates a new token for a given serviceaccount - // the first one will be picked always. - sort.Slice(secretList, func(i, j int) bool { - iCreationTimestamp := secretList[i].GetCreationTimestamp() - jCreationTimestamp := secretList[j].GetCreationTimestamp() - return iCreationTimestamp.Before(&jCreationTimestamp) - }) - - desiredSecretName := "" - for _, secret := range secretList { - // Find the SA token that matches the service account name. - if val, ok := secret.GetAnnotations()[corev1.ServiceAccountNameKey]; ok && val == desiredServiceAccountName { - if desiredServiceAccountName == "default" { - desiredSecretName = "kcp-" + secret.GetName() - break - } - desiredSecretName = secret.GetName() - break - } - } - - if desiredSecretName == "" { - return fmt.Errorf("couldn't find a token upstream for the serviceaccount %s/%s in workspace %s", desiredServiceAccountName, obj.GetNamespace(), upstreamLogicalName.String()) - } - - // Setting AutomountServiceAccountToken to false allow us to control the ServiceAccount - // VolumeMount and Volume definitions. - podTemplate.Spec.AutomountServiceAccountToken = utilspointer.Bool(false) - // Set to empty the serviceAccountName on podTemplate as we are not syncing the serviceAccount down to the workload cluster. - podTemplate.Spec.ServiceAccountName = "" - - workspaceURL, err := dm.getWorkspaceURL(obj) - if err != nil { - return err - } - - kcpExternalHost := workspaceURL.Hostname() - kcpExternalPort := workspaceURL.Port() - - overrideEnvs := []corev1.EnvVar{ - {Name: "KUBERNETES_SERVICE_PORT", Value: kcpExternalPort}, - {Name: "KUBERNETES_SERVICE_PORT_HTTPS", Value: kcpExternalPort}, - {Name: "KUBERNETES_SERVICE_HOST", Value: kcpExternalHost}, - } - - // This is the VolumeMount that we will append to all the containers of the deployment - serviceAccountMount := corev1.VolumeMount{ - Name: "kcp-api-access", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - ReadOnly: true, - } - - // This is the Volume that we will add to the Deployment in order to control - // the name of the ca.crt references (kcp-root-ca.crt vs kube-root-ca.crt) - // and the serviceaccount reference. - serviceAccountVolume := corev1.Volume{ - Name: "kcp-api-access", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - DefaultMode: utilspointer.Int32(420), - Sources: []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: desiredSecretName, - }, - Items: []corev1.KeyToPath{ - { - Key: "token", - Path: "token", - }, - { - Key: "namespace", - Path: "namespace", - }, - }, - }, - }, - { - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "kcp-root-ca.crt", - }, - Items: []corev1.KeyToPath{ - { - Key: "ca.crt", - Path: "ca.crt", - }, - }, - }, - }, - }, - }, - }, - } - - // Override Envs, resolve downwardAPI FieldRef and add the VolumeMount to all the containers - for i := range podTemplate.Spec.Containers { - for _, overrideEnv := range overrideEnvs { - podTemplate.Spec.Containers[i].Env = updateEnv(podTemplate.Spec.Containers[i].Env, overrideEnv) - } - podTemplate.Spec.Containers[i].Env = resolveDownwardAPIFieldRefEnv(podTemplate.Spec.Containers[i].Env, obj) - podTemplate.Spec.Containers[i].VolumeMounts = updateVolumeMount(podTemplate.Spec.Containers[i].VolumeMounts, serviceAccountMount) - } - - // Override Envs, resolve downwardAPI FieldRef and add the VolumeMount to all the Init containers - for i := range podTemplate.Spec.InitContainers { - for _, overrideEnv := range overrideEnvs { - podTemplate.Spec.InitContainers[i].Env = updateEnv(podTemplate.Spec.InitContainers[i].Env, overrideEnv) - } - podTemplate.Spec.InitContainers[i].Env = resolveDownwardAPIFieldRefEnv(podTemplate.Spec.InitContainers[i].Env, obj) - podTemplate.Spec.InitContainers[i].VolumeMounts = updateVolumeMount(podTemplate.Spec.InitContainers[i].VolumeMounts, serviceAccountMount) - } - - // Override Envs, resolve downwardAPI FieldRef and add the VolumeMount to all the Ephemeral containers - for i := range podTemplate.Spec.EphemeralContainers { - for _, overrideEnv := range overrideEnvs { - podTemplate.Spec.EphemeralContainers[i].Env = updateEnv(podTemplate.Spec.EphemeralContainers[i].Env, overrideEnv) - } - podTemplate.Spec.EphemeralContainers[i].Env = resolveDownwardAPIFieldRefEnv(podTemplate.Spec.EphemeralContainers[i].Env, obj) - podTemplate.Spec.EphemeralContainers[i].VolumeMounts = updateVolumeMount(podTemplate.Spec.EphemeralContainers[i].VolumeMounts, serviceAccountMount) - } - - // Add the ServiceAccount volume with our overrides. - found := false - for i := range podTemplate.Spec.Volumes { - if podTemplate.Spec.Volumes[i].Name == "kcp-api-access" { - podTemplate.Spec.Volumes[i] = serviceAccountVolume - found = true - } - } - if !found { - podTemplate.Spec.Volumes = append(podTemplate.Spec.Volumes, serviceAccountVolume) - } - - // Overrides DNS to point to the workspace DNS - dnsIP, err := dm.getDNSIPForWorkspace(upstreamLogicalName) - if err != nil { - // the DNS nameserver is not ready yet or other transient failure - return err // retry - } - - podTemplate.Spec.DNSPolicy = corev1.DNSNone - podTemplate.Spec.DNSConfig = &corev1.PodDNSConfig{ - Nameservers: []string{dnsIP}, - Searches: []string{ // TODO(LV): from /etc/resolv.conf - obj.GetNamespace() + ".svc.cluster.local", - "svc.cluster.local", - "cluster.local", - }, - Options: []corev1.PodDNSConfigOption{ - { - Name: "ndots", - Value: utilspointer.String("5"), - }, - }, - } - - if dm.upsyncPods { - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(dm.syncTargetClusterName, dm.syncTargetName) - labels := podTemplate.Labels - if labels == nil { - labels = map[string]string{} - } - labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] = string(workloadv1alpha1.ResourceStateUpsync) - labels[workloadv1alpha1.InternalDownstreamClusterLabel] = syncTargetKey - podTemplate.Labels = labels - - // TODO(davidfestal): In the future we could add a diff annotation to transform the resource while upsyncing: - // - remove unnecessary fields we don't want leaking to upstream - // - add an owner reference to the upstream deployment - } - - newPodTemplateUnstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(podTemplate) - if err != nil { - return err - } - - // Set the changes back into the obj. - return unstructured.SetNestedMap(obj.Object, newPodTemplateUnstr, "spec", "template") -} - -func (dm *PodSpecableMutator) getDNSIPForWorkspace(workspace logicalcluster.Name) (string, error) { - // Retrieve the DNS IP associated to the workspace - dnsServiceName := shared.GetDNSID(workspace, dm.syncTargetUID, dm.syncTargetName) - - svc, err := dm.serviceLister.Services(dm.dnsNamespace).Get(dnsServiceName) - if err != nil { - return "", fmt.Errorf("failed to get DNS service: %w", err) - } - - ip := svc.Spec.ClusterIP - if ip == "" { - // not available (yet) - return "", fmt.Errorf("DNS service IP address not found") - } - - return ip, nil -} - -// resolveDownwardAPIFieldRefEnv replaces the downwardAPI FieldRef EnvVars with the value from the deployment, right now it only replaces the metadata.namespace. -func resolveDownwardAPIFieldRefEnv(envs []corev1.EnvVar, podspecable *unstructured.Unstructured) []corev1.EnvVar { - var result []corev1.EnvVar - for _, env := range envs { - if env.ValueFrom != nil && env.ValueFrom.FieldRef != nil && env.ValueFrom.FieldRef.FieldPath == "metadata.namespace" { - result = append(result, corev1.EnvVar{ - Name: env.Name, - Value: podspecable.GetNamespace(), - }) - } else { - result = append(result, env) - } - } - return result -} - -// findEnv finds an env in a list of envs. -func findEnv(envs []corev1.EnvVar, name string) (bool, int) { - for i := range envs { - if envs[i].Name == name { - return true, i - } - } - return false, 0 -} - -// updateEnv updates an env from a list of envs. -func updateEnv(envs []corev1.EnvVar, overrideEnv corev1.EnvVar) []corev1.EnvVar { - found, i := findEnv(envs, overrideEnv.Name) - if found { - envs[i].Value = overrideEnv.Value - } else { - envs = append(envs, overrideEnv) - } - - return envs -} - -// findVolumeMount finds a volume mount in a list of volume mounts. -func findVolumeMount(volumeMounts []corev1.VolumeMount, name string) (bool, int) { - for i := range volumeMounts { - if volumeMounts[i].Name == name { - return true, i - } - } - return false, 0 -} - -// updateVolumeMount updates a volume mount from a list of volume mounts. -func updateVolumeMount(volumeMounts []corev1.VolumeMount, overrideVolumeMount corev1.VolumeMount) []corev1.VolumeMount { - found, i := findVolumeMount(volumeMounts, overrideVolumeMount.Name) - if found { - volumeMounts[i] = overrideVolumeMount - } else { - volumeMounts = append(volumeMounts, overrideVolumeMount) - } - - return volumeMounts -} diff --git a/pkg/syncer/spec/mutators/podspecable_test.go b/pkg/syncer/spec/mutators/podspecable_test.go deleted file mode 100644 index 10acf2b0e06..00000000000 --- a/pkg/syncer/spec/mutators/podspecable_test.go +++ /dev/null @@ -1,1005 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mutators - -import ( - "encoding/json" - "fmt" - "net/url" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - listerscorev1 "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - utilspointer "k8s.io/utils/pointer" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var kcpApiAccessVolume = corev1.Volume{ - Name: "kcp-api-access", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - DefaultMode: utilspointer.Int32(420), - Sources: []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "kcp-default-token-1234", - }, - Items: []corev1.KeyToPath{ - { - Key: "token", - Path: "token", - }, - { - Key: "namespace", - Path: "namespace", - }, - }, - }, - }, - { - ConfigMap: &corev1.ConfigMapProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "kcp-root-ca.crt", - }, - Items: []corev1.KeyToPath{ - { - Key: "ca.crt", - Path: "ca.crt", - }, - }, - }, - }, - }, - }, - }, -} - -var kcpApiAccessVolumeMount = corev1.VolumeMount{ - Name: "kcp-api-access", - MountPath: "/var/run/secrets/kubernetes.io/serviceaccount", - ReadOnly: true, -} - -func TestDeploymentMutate(t *testing.T) { - for _, c := range []struct { - desc string - upstreamSecrets []*corev1.Secret - originalDeployment, expectedDeployment *appsv1.Deployment - config *rest.Config - upsyncPods bool - }{{ - desc: "Deployment without Envs or volumes is mutated.", - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }, - }, { - desc: "Deployment without Envs or volumes is mutated, with pod-related changes if pod upsyncing is on.", - upsyncPods: true, - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "state.workload.kcp.io/" + workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name("root:default:testing"), "syncTargetName"): "Upsync", - "internal.workload.kcp.io/cluster": workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name("root:default:testing"), "syncTargetName"), - }, - }, - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }, - }, { - desc: "Deployment with one env var gets mutated but the already existing env var remains the same", - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "TEST_ENV_VAR", - Value: "test-value", - }, - }, - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "TEST_ENV_VAR", - Value: "test-value", - }, - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }, - }, - {desc: "Deployment with an env var named KUBERNETES_SERVICE_PORT gets mutated and it is overridden and not duplicated", - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "99999", - }, - }, - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }}, - {desc: "Deployment with an existing VolumeMount named kcp-api-access gets mutated and it is overridden and not duplicated", - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "99999", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "kcp-api-access", - MountPath: "totally-incorrect-path", - ReadOnly: false, - }, - }, - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }}, - {desc: "Deployment with an existing Volume named kcp-api-access gets mutated and it is overridden and not duplicated", - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "99999", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "kcp-api-access", - MountPath: "totally-not-the-path", - ReadOnly: false, - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - Name: "kcp-api-access", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "this-is-not-the-secret-you-are-looking-for", - }, - }, - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }}, - {desc: "Deployment with an EnvVar value coming from the DownwardAPI,only the metadata.namespace should be made static", - upstreamSecrets: []*corev1.Secret{ - { - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-1234", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - "kubernetes.io/service-account.name": "default", - }, - }, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }, - originalDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "DOWNWARDAPI_ENV_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "DOWNWARDAPI_ENV_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "MYENV", - Value: "myenv", - }, - }, - }, - }, - }, - }, - }, - }, - expectedDeployment: &appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-deployment", - Namespace: "namespace", - Annotations: map[string]string{ - logicalcluster.AnnotationKey: "root:default:testing", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: new(int32), - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - AutomountServiceAccountToken: utilspointer.Bool(false), - Containers: []corev1.Container{ - { - Name: "test-container", - Image: "test-image", - Env: []corev1.EnvVar{ - { - Name: "DOWNWARDAPI_ENV_NAMESPACE", - Value: "namespace", - }, - { - Name: "DOWNWARDAPI_ENV_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - { - Name: "MYENV", - Value: "myenv", - }, - { - Name: "KUBERNETES_SERVICE_PORT", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_PORT_HTTPS", - Value: "12345", - }, - { - Name: "KUBERNETES_SERVICE_HOST", - Value: "4.5.6.7", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - kcpApiAccessVolumeMount, - }, - }, - }, - DNSPolicy: corev1.DNSNone, - DNSConfig: &corev1.PodDNSConfig{ - Nameservers: []string{"8.8.8.8"}, - Searches: []string{"namespace.svc.cluster.local", "svc.cluster.local", "cluster.local"}, - Options: []corev1.PodDNSConfigOption{{Name: "ndots", Value: utilspointer.String("5")}}, - }, - Volumes: []corev1.Volume{ - kcpApiAccessVolume, - }, - }, - }, - }, - }, - config: &rest.Config{ - Host: "https://4.5.6.7:12345", - }}, - } { - { - t.Run(c.desc, func(t *testing.T) { - secretLister := func(upstreamLogicalCluster logicalcluster.Name, namespace string) ([]runtime.Object, error) { - unstructuredObjects := make([]runtime.Object, 0, len(c.upstreamSecrets)) - for _, obj := range c.upstreamSecrets { - unstObj, err := toUnstructured(obj) - require.NoError(t, err) - unstructuredObjects = append(unstructuredObjects, unstObj) - } - return unstructuredObjects, nil - } - - clusterName := logicalcluster.Name("root:default:testing") - - serviceIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) - - dnsServiceName := shared.GetDNSID(clusterName, "syncTargetUID", "syncTargetName") - err := serviceIndexer.Add(service(dnsServiceName, "dnsNamespace")) - require.NoError(t, err, "Service Add() = %v", err) - svcLister := listerscorev1.NewServiceLister(serviceIndexer) - - dm := &PodSpecableMutator{ - getWorkspaceURL: func(obj *unstructured.Unstructured) (*url.URL, error) { - return url.Parse(c.config.Host) - }, - listSecrets: secretLister, - serviceLister: svcLister, - syncTargetClusterName: clusterName, - syncTargetUID: "syncTargetUID", - syncTargetName: "syncTargetName", - dnsNamespace: "dnsNamespace", - upsyncPods: c.upsyncPods, - } - - unstrOriginalDeployment, err := toUnstructured(c.originalDeployment) - require.NoError(t, err, "toUnstructured() = %v", err) - - err = dm.Mutate(unstrOriginalDeployment) - require.NoError(t, err, "Mutate() = %v", err) - - mutatedOriginalDeployment, err := toDeployment(unstrOriginalDeployment) - require.NoError(t, err, "toDeployment() = %v", err) - - if !apiequality.Semantic.DeepEqual(mutatedOriginalDeployment, c.expectedDeployment) { - t.Errorf("expected deployments are not equal, got:\n %#v \n wanted:\n %#v \n", c.expectedDeployment, mutatedOriginalDeployment) - } - }) - } - } -} - -func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { - bs, err := json.Marshal(obj) - if err != nil { - return nil, fmt.Errorf("Marshal() = %w", err) - } - u := &unstructured.Unstructured{} - if err := json.Unmarshal(bs, u); err != nil { - return nil, fmt.Errorf("Unmarshal() = %w", err) - } - return u, nil -} - -func toDeployment(obj *unstructured.Unstructured) (*appsv1.Deployment, error) { - bs, err := json.Marshal(obj) - if err != nil { - return nil, fmt.Errorf("Marshal() = %w", err) - } - d := &appsv1.Deployment{} - if err := json.Unmarshal(bs, d); err != nil { - return nil, fmt.Errorf("Unmarshal() = %w", err) - } - return d, nil -} - -func service(name, namespace string) *corev1.Service { - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "8.8.8.8", - }, - } -} diff --git a/pkg/syncer/spec/mutators/secrets.go b/pkg/syncer/spec/mutators/secrets.go deleted file mode 100644 index 63cb6ca1ca1..00000000000 --- a/pkg/syncer/spec/mutators/secrets.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mutators - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type SecretMutator struct { -} - -func (sm *SecretMutator) GVRs() []schema.GroupVersionResource { - return []schema.GroupVersionResource{ - { - Group: "", - Version: "v1", - Resource: "secrets", - }, - } -} - -func NewSecretMutator() *SecretMutator { - return &SecretMutator{} -} - -// Mutate applies the mutator changes to the object. -func (sm *SecretMutator) Mutate(obj *unstructured.Unstructured) error { - // We need transform the serviceaccount tokens into an Opaque secret, in order to avoid the pcluster to rewrite them, - // and we remove the annotations that point to the kcp serviceaccount name/uid. - if _, ok := obj.GetAnnotations()[corev1.ServiceAccountNameKey]; ok { - obj.Object["type"] = string(corev1.SecretTypeOpaque) - annotations := obj.GetAnnotations() - delete(annotations, corev1.ServiceAccountNameKey) - delete(annotations, corev1.ServiceAccountUIDKey) - if len(annotations) == 0 { - obj.SetAnnotations(nil) - } else { - obj.SetAnnotations(annotations) - } - } - - return nil -} diff --git a/pkg/syncer/spec/mutators/secrets_test.go b/pkg/syncer/spec/mutators/secrets_test.go deleted file mode 100644 index 70566ce5023..00000000000 --- a/pkg/syncer/spec/mutators/secrets_test.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mutators - -import ( - "testing" - - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func TestSecretsMutate(t *testing.T) { - for _, c := range []struct { - desc string - originalSecret, expectedSecret *corev1.Secret - }{{ - desc: "A secret that is not a ServiceAccount token, should not be mutated", - originalSecret: &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-super-secret-stuff", - Annotations: map[string]string{ - "testing": "testing", - }, - }, - Data: map[string][]byte{ - "foo": []byte("bar"), - }, - }, - expectedSecret: &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "my-super-secret-stuff", - Annotations: map[string]string{ - "testing": "testing", - }, - }, - Data: map[string][]byte{ - "foo": []byte("bar"), - }, - }, - }, { - desc: "A ServiceAccount secret, should be mutated", - originalSecret: &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-f8f8f8", - Annotations: map[string]string{ - "kubernetes.io/service-account.name": "default", - "kubernetes.io/service-account.uid": "asdasdasdasdasdsadsadas", - }, - }, - Type: corev1.SecretTypeServiceAccountToken, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - expectedSecret: &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default-token-f8f8f8", - }, - Type: corev1.SecretTypeOpaque, - Data: map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }, - }, - }} { - t.Run(c.desc, func(t *testing.T) { - sm := SecretMutator{} - unstrOriginalSecret, err := toUnstructured(c.originalSecret) - require.NoError(t, err) - unstrExpectedSecret, err := toUnstructured(c.expectedSecret) - require.NoError(t, err) - // The Secret mutator doesn't use the logical cluster. - err = sm.Mutate(unstrOriginalSecret) - require.NoError(t, err) - if !apiequality.Semantic.DeepEqual(unstrOriginalSecret, unstrExpectedSecret) { - t.Errorf("secret mutated incorrectly, got: %v expected: %v", unstrOriginalSecret.Object, unstrExpectedSecret.Object) - } - }) - } -} diff --git a/pkg/syncer/spec/spec_controller.go b/pkg/syncer/spec/spec_controller.go deleted file mode 100644 index fe33f2b0b90..00000000000 --- a/pkg/syncer/spec/spec_controller.go +++ /dev/null @@ -1,432 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "context" - "encoding/json" - "fmt" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - syncerindexers "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - "github.com/kcp-dev/kcp/pkg/syncer/spec/dns" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -const ( - controllerName = "kcp-workload-syncer-spec" -) - -var namespaceGVR schema.GroupVersionResource = corev1.SchemeGroupVersion.WithResource("namespaces") - -type Mutator interface { - GVRs() []schema.GroupVersionResource - Mutate(obj *unstructured.Unstructured) error -} - -type Controller struct { - queue workqueue.RateLimitingInterface - - mutators map[schema.GroupVersionResource]Mutator - dnsProcessor *dns.DNSProcessor - - getUpstreamClient func(clusterName logicalcluster.Name) (dynamic.Interface, error) - downstreamClient dynamic.Interface - - getUpstreamLister func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) - getDownstreamLister func(gvr schema.GroupVersionResource) (cache.GenericLister, error) - listDownstreamNamespacesByLocator func(jsonLocator string) ([]*unstructured.Unstructured, error) - - downstreamNSCleaner shared.Cleaner - syncTargetName string - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID - syncTargetKey string - advancedSchedulingEnabled bool -} - -func NewSpecSyncer(syncerLogger logr.Logger, syncTargetClusterName logicalcluster.Name, syncTargetName, syncTargetKey string, - advancedSchedulingEnabled bool, - upstreamClient kcpdynamic.ClusterInterface, downstreamClient dynamic.Interface, downstreamKubeClient kubernetes.Interface, - ddsifForUpstreamSyncer *ddsif.DiscoveringDynamicSharedInformerFactory, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], - downstreamNSCleaner shared.Cleaner, - syncTargetUID types.UID, - dnsNamespace string, - dnsProcessor *dns.DNSProcessor, - dnsImage string, - mutators ...Mutator) (*Controller, error) { - c := Controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName), - - getUpstreamClient: func(clusterName logicalcluster.Name) (dynamic.Interface, error) { - return upstreamClient.Cluster(clusterName.Path()), nil - }, - downstreamClient: downstreamClient, - downstreamNSCleaner: downstreamNSCleaner, - - getDownstreamLister: func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", gvr) - } - return informer.Lister(), nil - }, - getUpstreamLister: func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, notSynced := ddsifForUpstreamSyncer.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the upstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the upstream informer factory", gvr) - } - return informer.Lister().ByCluster(clusterName), nil - }, - - listDownstreamNamespacesByLocator: func(jsonLocator string) ([]*unstructured.Unstructured, error) { - nsInformer, err := ddsifForDownstream.ForResource(namespaceGVR) - if err != nil { - return nil, err - } - return indexers.ByIndex[*unstructured.Unstructured](nsInformer.Informer().GetIndexer(), syncerindexers.ByNamespaceLocatorIndexName, jsonLocator) - }, - syncTargetName: syncTargetName, - syncTargetClusterName: syncTargetClusterName, - syncTargetUID: syncTargetUID, - syncTargetKey: syncTargetKey, - advancedSchedulingEnabled: advancedSchedulingEnabled, - - mutators: make(map[schema.GroupVersionResource]Mutator, 2), - dnsProcessor: dnsProcessor, - } - - for _, mutator := range mutators { - for _, gvr := range mutator.GVRs() { - c.mutators[gvr] = mutator - } - } - - logger := logging.WithReconciler(syncerLogger, controllerName) - - namespaceGVR := corev1.SchemeGroupVersion.WithResource("namespaces") - - ddsifForUpstreamSyncer.AddEventHandler( - ddsif.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - c.AddToQueue(gvr, obj, logger) - }, - UpdateFunc: func(gvr schema.GroupVersionResource, oldObj, newObj interface{}) { - if gvr == namespaceGVR { - return - } - oldUnstrob := oldObj.(*unstructured.Unstructured) - newUnstrob := newObj.(*unstructured.Unstructured) - - if !deepEqualApartFromStatus(logger, oldUnstrob, newUnstrob) { - c.AddToQueue(gvr, newUnstrob, logger) - } - }, - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - c.AddToQueue(gvr, obj, logger) - }, - }, - ) - - return &c, nil -} - -func NewSpecSyncerForDownstream(syncerLogger logr.Logger, syncTargetClusterName logicalcluster.Name, syncTargetName, syncTargetKey string, - advancedSchedulingEnabled bool, - getShardAccess synctarget.GetShardAccessFunc, - downstreamClient dynamic.Interface, downstreamKubeClient kubernetes.Interface, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], - downstreamNSCleaner shared.Cleaner, - syncTargetUID types.UID, - dnsNamespace string, - dnsProcessor *dns.DNSProcessor, - dnsImage string, - mutators ...Mutator) (*Controller, error) { - c := Controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName), - - getUpstreamClient: func(clusterName logicalcluster.Name) (dynamic.Interface, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - return shardAccess.SyncerClient.Cluster(clusterName.Path()), nil - }, - downstreamClient: downstreamClient, - downstreamNSCleaner: downstreamNSCleaner, - - getDownstreamLister: func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", gvr) - } - return informer.Lister(), nil - }, - getUpstreamLister: func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - - informers, notSynced := shardAccess.SyncerDDSIF.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the upstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the upstream informer factory", gvr) - } - return informer.Lister().ByCluster(clusterName), nil - }, - - listDownstreamNamespacesByLocator: func(jsonLocator string) ([]*unstructured.Unstructured, error) { - nsInformer, err := ddsifForDownstream.ForResource(namespaceGVR) - if err != nil { - return nil, err - } - return indexers.ByIndex[*unstructured.Unstructured](nsInformer.Informer().GetIndexer(), syncerindexers.ByNamespaceLocatorIndexName, jsonLocator) - }, - syncTargetName: syncTargetName, - syncTargetClusterName: syncTargetClusterName, - syncTargetUID: syncTargetUID, - syncTargetKey: syncTargetKey, - advancedSchedulingEnabled: advancedSchedulingEnabled, - - mutators: make(map[schema.GroupVersionResource]Mutator, 2), - - dnsProcessor: dnsProcessor, - } - - for _, mutator := range mutators { - for _, gvr := range mutator.GVRs() { - c.mutators[gvr] = mutator - } - } - - logger := logging.WithReconciler(syncerLogger, controllerName) - - namespaceGVR := corev1.SchemeGroupVersion.WithResource("namespaces") - - ddsifForDownstream.AddEventHandler(ddsif.GVREventHandlerFuncs{ - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = d.Obj - } - unstrObj, ok := obj.(*unstructured.Unstructured) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", unstrObj)) - return - } - if unstrObj.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] == string(workloadv1alpha1.ResourceStateUpsync) { - return - } - - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(fmt.Errorf("error getting key for type %T: %w", obj, err)) - return - } - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - utilruntime.HandleError(fmt.Errorf("error splitting key %q: %w", key, err)) - } - logger := logging.WithQueueKey(logger, key).WithValues("gvr", gvr, DownstreamNamespace, namespace, DownstreamName, name) - logger.V(3).Info("processing delete event") - - var nsLocatorHolder *unstructured.Unstructured - // Handle namespaced resources - if namespace != "" { - // Use namespace lister - namespaceLister, err := c.getDownstreamLister(namespaceGVR) - if err != nil { - utilruntime.HandleError(err) - return - } - - nsObj, err := namespaceLister.Get(namespace) - if apierrors.IsNotFound(err) { - return - } - if err != nil { - utilruntime.HandleError(err) - return - } - c.downstreamNSCleaner.PlanCleaning(namespace) - nsLocatorHolder, ok = nsObj.(*unstructured.Unstructured) - if !ok { - utilruntime.HandleError(fmt.Errorf("unexpected object type: %T", nsObj)) - return - } - } else { - // The nsLocatorHolder is in the resource itself for cluster-scoped resources. - nsLocatorHolder = unstrObj - } - logger = logging.WithObject(logger, nsLocatorHolder) - - locator, ok := nsLocatorHolder.GetAnnotations()[shared.NamespaceLocatorAnnotation] - if !ok { - utilruntime.HandleError(fmt.Errorf("unable to find the locator annotation in resource %s", nsLocatorHolder.GetName())) - return - } - nsLocator := &shared.NamespaceLocator{} - err = json.Unmarshal([]byte(locator), nsLocator) - if err != nil { - utilruntime.HandleError(err) - return - } - logger.V(4).Info("found", "NamespaceLocator", nsLocator) - m := &metav1.ObjectMeta{ - Annotations: map[string]string{ - logicalcluster.AnnotationKey: nsLocator.ClusterName.String(), - }, - Namespace: nsLocator.Namespace, - Name: shared.GetUpstreamResourceName(gvr, name), - } - c.AddToQueue(gvr, m, logger) - }, - }) - - return &c, nil -} - -type queueKey struct { - gvr schema.GroupVersionResource - key string // meta namespace key -} - -func (c *Controller) AddToQueue(gvr schema.GroupVersionResource, obj interface{}, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info("queueing GVR", "gvr", gvr.String()) - c.queue.Add( - queueKey{ - gvr: gvr, - key: key, - }, - ) -} - -// Start starts N worker processes processing work items. -func (c *Controller) Start(ctx context.Context, numThreads int) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting syncer workers") - defer logger.Info("Stopping syncer workers") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -// startWorker processes work items until stopCh is closed. -func (c *Controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *Controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - key, quit := c.queue.Get() - if quit { - return false - } - qk := key.(queueKey) - - logger := logging.WithQueueKey(klog.FromContext(ctx), qk.key).WithValues("gvr", qk.gvr) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if retryAfter, err := c.process(ctx, qk.gvr, qk.key); err != nil { - utilruntime.HandleError(fmt.Errorf("%s failed to sync %q, err: %w", controllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } else if retryAfter != nil { - c.queue.AddAfter(key, *retryAfter) - return true - } - - c.queue.Forget(key) - - return true -} diff --git a/pkg/syncer/spec/spec_process.go b/pkg/syncer/spec/spec_process.go deleted file mode 100644 index 22e159082a5..00000000000 --- a/pkg/syncer/spec/spec_process.go +++ /dev/null @@ -1,571 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strings" - "time" - - jsonpatch "github.com/evanphx/json-patch" - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/endpoints/handlers" - "k8s.io/klog/v2" - "k8s.io/utils/pointer" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -const ( - syncerApplyManager = "syncer" -) - -func deepEqualApartFromStatus(logger logr.Logger, oldUnstrob, newUnstrob *unstructured.Unstructured) bool { - // TODO(jmprusi): Remove this after switching to virtual workspaces. - // remove status annotation from oldObj and newObj before comparing - oldAnnotations, _, err := unstructured.NestedStringMap(oldUnstrob.Object, "metadata", "annotations") - if err != nil { - logger.Error(err, "failed to get annotations from object") - return false - } - for k := range oldAnnotations { - if strings.HasPrefix(k, workloadv1alpha1.InternalClusterStatusAnnotationPrefix) { - delete(oldAnnotations, k) - } - } - - newAnnotations, _, err := unstructured.NestedStringMap(newUnstrob.Object, "metadata", "annotations") - if err != nil { - logger.Error(err, "failed to get annotations from object") - return false - } - for k := range newAnnotations { - if strings.HasPrefix(k, workloadv1alpha1.InternalClusterStatusAnnotationPrefix) { - delete(newAnnotations, k) - } - } - - if !equality.Semantic.DeepEqual(oldAnnotations, newAnnotations) { - return false - } - if !equality.Semantic.DeepEqual(oldUnstrob.GetLabels(), newUnstrob.GetLabels()) { - return false - } - if !equality.Semantic.DeepEqual(oldUnstrob.GetFinalizers(), newUnstrob.GetFinalizers()) { - return false - } - - oldIsBeingDeleted := oldUnstrob.GetDeletionTimestamp() != nil - newIsBeingDeleted := newUnstrob.GetDeletionTimestamp() != nil - if oldIsBeingDeleted != newIsBeingDeleted { - return false - } - - oldObjKeys := sets.StringKeySet(oldUnstrob.UnstructuredContent()) - newObjKeys := sets.StringKeySet(newUnstrob.UnstructuredContent()) - for _, key := range oldObjKeys.Union(newObjKeys).UnsortedList() { - if key == "metadata" || key == "status" { - continue - } - if !equality.Semantic.DeepEqual(oldUnstrob.UnstructuredContent()[key], newUnstrob.UnstructuredContent()[key]) { - return false - } - } - return true -} - -func (c *Controller) process(ctx context.Context, gvr schema.GroupVersionResource, key string) (retryAfter *time.Duration, err error) { - logger := klog.FromContext(ctx) - - // from upstream - clusterName, upstreamNamespace, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - logger.Error(err, "Invalid key") - return nil, nil - } - logger = logger.WithValues(logging.WorkspaceKey, clusterName, logging.NamespaceKey, upstreamNamespace, logging.NameKey, name) - - desiredNSLocator := shared.NewNamespaceLocator(clusterName, c.syncTargetClusterName, c.syncTargetUID, c.syncTargetName, upstreamNamespace) - jsonNSLocator, err := json.Marshal(desiredNSLocator) - if err != nil { - return nil, err - } - - var downstreamNamespace string - // Only look for the downstream namespace if the resource is namespaced, avoid in case of cluster-scoped. - if upstreamNamespace != "" { - downstreamNamespaces, err := c.listDownstreamNamespacesByLocator(string(jsonNSLocator)) - if err != nil { - return nil, err - } - - if len(downstreamNamespaces) == 1 { - namespace := downstreamNamespaces[0] - logger.WithValues(DownstreamName, namespace.GetName()).V(4).Info("Found downstream namespace for upstream namespace") - downstreamNamespace = namespace.GetName() - } else if len(downstreamNamespaces) > 1 { - // This should never happen unless there's some namespace collision. - var namespacesCollisions []string - for _, namespace := range downstreamNamespaces { - namespacesCollisions = append(namespacesCollisions, namespace.GetName()) - } - return nil, fmt.Errorf("(namespace collision) found multiple downstream namespaces: %s for upstream namespace %s|%s", strings.Join(namespacesCollisions, ","), clusterName, upstreamNamespace) - } else { - logger.V(4).Info("No downstream namespaces found") - downstreamNamespace, err = shared.PhysicalClusterNamespaceName(desiredNSLocator) - if err != nil { - logger.Error(err, "Error hashing namespace") - return nil, nil - } - } - } - logger = logger.WithValues(DownstreamNamespace, downstreamNamespace) - - // get the upstream object - upstreamSyncerLister, err := c.getUpstreamLister(clusterName, gvr) - if err != nil { - return nil, err - } - - obj, err := upstreamSyncerLister.ByNamespace(upstreamNamespace).Get(name) - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - if apierrors.IsNotFound(err) { - // deleted upstream => delete downstream - logger.Info("Deleting downstream object for upstream object") - if downstreamNamespace != "" { - err = c.downstreamClient.Resource(gvr).Namespace(downstreamNamespace).Delete(ctx, name, metav1.DeleteOptions{}) - } else { - err = c.downstreamClient.Resource(gvr).Delete(ctx, name, metav1.DeleteOptions{}) - } - if err != nil && !apierrors.IsNotFound(err) { - return nil, err - } - // If the resource is namespaced, let's plan the cleanup of it's namespace. - if downstreamNamespace != "" { - c.downstreamNSCleaner.PlanCleaning(downstreamNamespace) - } - return nil, nil - } - - // upsert downstream - upstreamObj, ok := obj.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("object to synchronize is expected to be Unstructured, but is %T", obj) - } - - if actualVersion := upstreamObj.GetAnnotations()[handlers.KCPOriginalAPIVersionAnnotation]; actualVersion != "" { - actualGV, err := schema.ParseGroupVersion(actualVersion) - if err != nil { - logger.Error(err, "error parsing original API version annotation", "annotation", actualVersion) - // Returning an error and reprocessing will presumably result in the same parse error, so just return - // nil here. - return nil, nil - } - gvr.Version = actualGV.Version - logger.V(4).Info("using actual API version from annotation", "actual", actualVersion) - } - - if downstreamNamespace != "" { - if err := c.ensureDownstreamNamespaceExists(ctx, downstreamNamespace, upstreamObj); err != nil { - return nil, err - } - } else { - // In cluser-wide resources we also need to check for possible collisions, as the resource could exist in the pcluster but now owned by this workspace. - // TODO(jmprusi): We should indicate the collision somehow (condition/annotation?) to avoid retrying the resource over and over. - if err := c.clusterWideCollisionCheck(ctx, gvr, upstreamObj); err != nil { - return nil, err - } - } - - deploymentGVR := schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - } - - // Make sure the DNS nameserver for the current workspace is up and running - if dnsUpAndReady, err := c.dnsProcessor.EnsureDNSUpAndReady(ctx, desiredNSLocator); err != nil { - logger.Error(err, "failed to check DNS nameserver is up and running (retrying)") - return nil, err - } else if !dnsUpAndReady && gvr == deploymentGVR { - logger.Info("DNS Server is not ready for the deployment resource to sync: requeue the deployment in 500ms") - retryDelay := 500 * time.Millisecond - return &retryDelay, nil - } - - if added, err := c.ensureSyncerFinalizer(ctx, gvr, upstreamObj); added { - // The successful update of the upstream resource finalizer will trigger a new reconcile - return nil, nil - } else if err != nil { - return nil, err - } - - return nil, c.applyToDownstream(ctx, gvr, downstreamNamespace, upstreamObj) -} - -// TODO: This function is there as a quick and dirty implementation of namespace creation. -// -// In fact We should also be getting notifications about namespaces created upstream and be creating downstream equivalents. -func (c *Controller) ensureDownstreamNamespaceExists(ctx context.Context, downstreamNamespace string, upstreamObj *unstructured.Unstructured) error { - logger := klog.FromContext(ctx) - - // If the namespace was marked for deletion, let's cancel it, as we expect to use it. - c.downstreamNSCleaner.CancelCleaning(downstreamNamespace) - - namespaces := c.downstreamClient.Resource(schema.GroupVersionResource{ - Group: "", - Version: "v1", - Resource: "namespaces", - }) - - newNamespace := &unstructured.Unstructured{} - newNamespace.SetAPIVersion("v1") - newNamespace.SetKind("Namespace") - newNamespace.SetName(downstreamNamespace) - - // TODO: if the downstream namespace loses these annotations/labels after creation, - // we don't have anything in place currently that will put them back. - upstreamLogicalCluster := logicalcluster.From(upstreamObj) - desiredNSLocator := shared.NewNamespaceLocator(upstreamLogicalCluster, c.syncTargetClusterName, c.syncTargetUID, c.syncTargetName, upstreamObj.GetNamespace()) - b, err := json.Marshal(desiredNSLocator) - if err != nil { - return err - } - newNamespace.SetAnnotations(map[string]string{ - shared.NamespaceLocatorAnnotation: string(b), - }) - - desiredTenantID, err := shared.GetTenantID(desiredNSLocator) - if err != nil { - return err - } - - newNamespace.SetLabels(map[string]string{ - // TODO: this should be set once at syncer startup and propagated around everywhere. - workloadv1alpha1.InternalDownstreamClusterLabel: c.syncTargetKey, - shared.TenantIDLabel: desiredTenantID, - }) - - namespaceLister, err := c.getDownstreamLister(namespaceGVR) - if err != nil { - return err - } - - // Check if the namespace already exists, if not create it. - namespace, err := namespaceLister.Get(newNamespace.GetName()) - if apierrors.IsNotFound(err) { - _, err = namespaces.Create(ctx, newNamespace, metav1.CreateOptions{}) - if err == nil { - logger.Info("Created downstream namespace for upstream namespace") - return nil - } - } - if apierrors.IsAlreadyExists(err) { - namespace, err = namespaces.Get(ctx, newNamespace.GetName(), metav1.GetOptions{}) - } - if err != nil { - return err - } - - // The namespace exists, so check if it has the correct namespace locator. - unstrNamespace := namespace.(*unstructured.Unstructured) - nsLocator, exists, err := shared.LocatorFromAnnotations(unstrNamespace.GetAnnotations()) - if err != nil { - return fmt.Errorf("(possible namespace collision) namespace %s already exists, but found an error when trying to decode the annotation: %w", newNamespace.GetName(), err) - } - if !exists { - return fmt.Errorf("(namespace collision) namespace %s has no namespace locator", unstrNamespace.GetName()) - } - if !reflect.DeepEqual(desiredNSLocator, *nsLocator) { - return fmt.Errorf("(namespace collision) namespace %s already exists, but has a different namespace locator annotation: %+v vs %+v", newNamespace.GetName(), nsLocator, desiredNSLocator) - } - - // Handle kcp upgrades by checking the tenant ID is set and correct - if tenantID, ok := unstrNamespace.GetLabels()[shared.TenantIDLabel]; !ok || tenantID != desiredTenantID { - labels := unstrNamespace.GetLabels() - labels[shared.TenantIDLabel] = desiredTenantID - unstrNamespace.SetLabels(labels) - _, err := namespaces.Update(ctx, unstrNamespace, metav1.UpdateOptions{}) - return err - } - - return nil -} - -// TODO(jmprusi): merge with ensureDownstreamNamespaceExists and make it more generic. -func (c *Controller) clusterWideCollisionCheck(ctx context.Context, gvr schema.GroupVersionResource, upstreamObj *unstructured.Unstructured) error { - // Check if the resource already exists, if so check if it has the correct namespace locator. - downstreamLister, err := c.getDownstreamLister(gvr) - if err != nil { - return err - } - resource, err := downstreamLister.Get(upstreamObj.GetName()) - if apierrors.IsNotFound(err) { - return nil - } else if err != nil { - return err - } - - // The resource exists, so check if it has the correct namespace locator. - unstrResource := resource.(*unstructured.Unstructured) - nsLocator, exists, err := shared.LocatorFromAnnotations(unstrResource.GetAnnotations()) - if err != nil { - return fmt.Errorf("(possible cluster-wide resource collision) resource %s already exists, but found an error when trying to decode the annotation: %w", unstrResource.GetName(), err) - } - if !exists { - return fmt.Errorf("(cluster-wide resource collision) resource %s has no namespace locator", unstrResource.GetName()) - } - if nsLocator.ClusterName != c.syncTargetClusterName { - return fmt.Errorf("(cluster-wide resource collision) resource %s already exists, but has a different namespace locator annotation: %+v", unstrResource.GetName(), nsLocator) - } - - return nil -} - -func (c *Controller) ensureSyncerFinalizer(ctx context.Context, gvr schema.GroupVersionResource, upstreamObj *unstructured.Unstructured) (bool, error) { - logger := klog.FromContext(ctx) - - upstreamFinalizers := upstreamObj.GetFinalizers() - hasFinalizer := false - for _, finalizer := range upstreamFinalizers { - if finalizer == shared.SyncerFinalizerNamePrefix+c.syncTargetKey { - hasFinalizer = true - } - } - - // TODO(davidfestal): When using syncer virtual workspace we would check the DeletionTimestamp on the upstream object, instead of the DeletionTimestamp annotation, - // as the virtual workspace will set the deletionTimestamp() on the location view by a transformation. - intendedToBeRemovedFromLocation := upstreamObj.GetAnnotations()[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+c.syncTargetKey] != "" - - // TODO(davidfestal): When using syncer virtual workspace this condition would not be necessary anymore, since directly tested on the virtual workspace side. - stillOwnedByExternalActorForLocation := upstreamObj.GetAnnotations()[workloadv1alpha1.ClusterFinalizerAnnotationPrefix+c.syncTargetKey] != "" - - if !hasFinalizer && (!intendedToBeRemovedFromLocation || stillOwnedByExternalActorForLocation) { - upstreamObjCopy := upstreamObj.DeepCopy() - namespace := upstreamObjCopy.GetNamespace() - clusterName := logicalcluster.From(upstreamObjCopy) - - upstreamFinalizers = append(upstreamFinalizers, shared.SyncerFinalizerNamePrefix+c.syncTargetKey) - upstreamObjCopy.SetFinalizers(upstreamFinalizers) - upstreamClient, err := c.getUpstreamClient(clusterName) - if err != nil { - return false, err - } - if _, err := upstreamClient.Resource(gvr).Namespace(namespace).Update(ctx, upstreamObjCopy, metav1.UpdateOptions{}); err != nil { - logger.Error(err, "Failed adding finalizer on upstream upstreamresource") - return false, err - } - logger.Info("Updated upstream resource with syncer finalizer") - return true, nil - } - - return false, nil -} - -func (c *Controller) applyToDownstream(ctx context.Context, gvr schema.GroupVersionResource, downstreamNamespace string, upstreamObj *unstructured.Unstructured) error { - logger := klog.FromContext(ctx) - - upstreamObjLogicalCluster := logicalcluster.From(upstreamObj) - downstreamObj := upstreamObj.DeepCopy() - - // Run name transformations on the downstreamObj. - transformedName := getTransformedName(downstreamObj) - - // TODO(jmprusi): When using syncer virtual workspace we would check the DeletionTimestamp on the upstream object, instead of the DeletionTimestamp annotation, - // as the virtual workspace will set the deletionTimestamp() on the location view by a transformation. - intendedToBeRemovedFromLocation := upstreamObj.GetAnnotations()[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+c.syncTargetKey] != "" - - // TODO(jmprusi): When using syncer virtual workspace this condition would not be necessary anymore, since directly tested on the virtual workspace side. - stillOwnedByExternalActorForLocation := upstreamObj.GetAnnotations()[workloadv1alpha1.ClusterFinalizerAnnotationPrefix+c.syncTargetKey] != "" - - upstreamSyncerLister, err := c.getUpstreamLister(upstreamObjLogicalCluster, gvr) - if err != nil { - return err - } - - logger = logger.WithValues(DownstreamName, transformedName) - ctx = klog.NewContext(ctx, logger) - - logger.V(4).Info("Upstream object is intended to be removed", "intendedToBeRemovedFromLocation", intendedToBeRemovedFromLocation, "stillOwnedByExternalActorForLocation", stillOwnedByExternalActorForLocation) - if intendedToBeRemovedFromLocation && !stillOwnedByExternalActorForLocation { - var err error - if downstreamNamespace != "" { - err = c.downstreamClient.Resource(gvr).Namespace(downstreamNamespace).Delete(ctx, transformedName, metav1.DeleteOptions{}) - } else { - err = c.downstreamClient.Resource(gvr).Delete(ctx, transformedName, metav1.DeleteOptions{}) - } - if err != nil { - if apierrors.IsNotFound(err) { - // That's not an error. - // Just think about removing the finalizer from the KCP location-specific resource: - upstreamClient, err := c.getUpstreamClient(upstreamObjLogicalCluster) - if err != nil { - return err - } - return shared.EnsureUpstreamFinalizerRemoved(ctx, gvr, upstreamSyncerLister, upstreamClient, upstreamObj.GetNamespace(), c.syncTargetKey, upstreamObj.GetName()) - } - logger.Error(err, "Error deleting upstream resource from downstream") - return err - } - if downstreamNamespace != "" { - c.downstreamNSCleaner.PlanCleaning(downstreamNamespace) - } - logger.V(2).Info("Deleted upstream resource from downstream") - return nil - } - - // Run any transformations on the object before we apply it to the downstream cluster. - if mutator, ok := c.mutators[gvr]; ok { - if err := mutator.Mutate(downstreamObj); err != nil { - return err - } - } - - downstreamObj.SetName(transformedName) - downstreamObj.SetUID("") - downstreamObj.SetResourceVersion("") - downstreamObj.SetNamespace(downstreamNamespace) - downstreamObj.SetManagedFields(nil) - - // Strip cluster name annotation - downstreamAnnotations := downstreamObj.GetAnnotations() - delete(downstreamAnnotations, logicalcluster.AnnotationKey) - // TODO(jmprusi): To be removed when switching to the syncer Virtual Workspace transformations. - delete(downstreamAnnotations, workloadv1alpha1.InternalClusterStatusAnnotationPrefix+c.syncTargetKey) - // If the resource is cluster-scoped, we need to add the namespaceLocator annotation to get be able to - // find out the upstream resource from the downstream resource. - if downstreamNamespace == "" { - namespaceLocator := shared.NewNamespaceLocator(upstreamObjLogicalCluster, c.syncTargetClusterName, c.syncTargetUID, c.syncTargetName, "") - namespaceLocatorJSONBytes, err := json.Marshal(namespaceLocator) - if err != nil { - return err - } - downstreamAnnotations[shared.NamespaceLocatorAnnotation] = string(namespaceLocatorJSONBytes) - } - - // If we're left with 0 annotations, nil out the map so it's not included in the patch - if len(downstreamAnnotations) == 0 { - downstreamAnnotations = nil - } - downstreamObj.SetAnnotations(downstreamAnnotations) - - // Deletion fields are immutable and set by the downstream API server - downstreamObj.SetDeletionTimestamp(nil) - downstreamObj.SetDeletionGracePeriodSeconds(nil) - // Strip owner references, to avoid orphaning by broken references, - // and make sure cascading deletion is only performed once upstream. - downstreamObj.SetOwnerReferences(nil) - // Strip finalizers to avoid the deletion of the downstream resource from being blocked. - downstreamObj.SetFinalizers(nil) - - // replace upstream state label with downstream cluster label. We don't want to leak upstream state machine - // state to downstream, and also we don't need downstream updates every time the upstream state machine changes. - labels := downstreamObj.GetLabels() - delete(labels, workloadv1alpha1.ClusterResourceStateLabelPrefix+c.syncTargetKey) - labels[workloadv1alpha1.InternalDownstreamClusterLabel] = c.syncTargetKey - downstreamObj.SetLabels(labels) - - if c.advancedSchedulingEnabled { - specDiffPatch := upstreamObj.GetAnnotations()[workloadv1alpha1.ClusterSpecDiffAnnotationPrefix+c.syncTargetKey] - if specDiffPatch != "" { - upstreamSpec, specExists, err := unstructured.NestedFieldCopy(upstreamObj.UnstructuredContent(), "spec") - if err != nil { - return err - } - if specExists { - // TODO(jmprusi): Surface those errors to the user. - patch, err := jsonpatch.DecodePatch([]byte(specDiffPatch)) - if err != nil { - logger.Error(err, "Failed to decode spec diff patch") - return err - } - upstreamSpecJSON, err := json.Marshal(upstreamSpec) - if err != nil { - return err - } - patchedUpstreamSpecJSON, err := patch.Apply(upstreamSpecJSON) - if err != nil { - return err - } - var newSpec map[string]interface{} - if err := json.Unmarshal(patchedUpstreamSpecJSON, &newSpec); err != nil { - return err - } - if err := unstructured.SetNestedMap(downstreamObj.UnstructuredContent(), newSpec, "spec"); err != nil { - return err - } - } - } - } - - // Marshalling the unstructured object is good enough as SSA patch - data, err := json.Marshal(downstreamObj) - if err != nil { - return err - } - - // Check if the resource is cluster-wide or namespaced and patch it appropriately. - if downstreamNamespace != "" { - _, err = c.downstreamClient.Resource(gvr).Namespace(downstreamNamespace).Patch(ctx, downstreamObj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: syncerApplyManager, Force: pointer.Bool(true)}) - } else { - _, err = c.downstreamClient.Resource(gvr).Patch(ctx, downstreamObj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{FieldManager: syncerApplyManager, Force: pointer.Bool(true)}) - } - - if err != nil { - logger.Error(err, "Error upserting upstream resource to downstream") - return err - } - logger.Info("Upserted upstream resource to downstream") - - return nil -} - -// getTransformedName returns the desired object name. -func getTransformedName(syncedObject *unstructured.Unstructured) string { - configMapGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} - secretGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} - - if syncedObject.GroupVersionKind() == configMapGVK && syncedObject.GetName() == "kube-root-ca.crt" { - return "kcp-root-ca.crt" - } - // Only rename the default-token-* secrets that are owned by the default SA. - if syncedObject.GroupVersionKind() == secretGVK && strings.HasPrefix(syncedObject.GetName(), "default-token-") { - if saName, ok := syncedObject.GetAnnotations()[corev1.ServiceAccountNameKey]; ok && saName == "default" { - return "kcp-" + syncedObject.GetName() - } - } - return syncedObject.GetName() -} diff --git a/pkg/syncer/spec/spec_process_test.go b/pkg/syncer/spec/spec_process_test.go deleted file mode 100644 index bd1a6b7343c..00000000000 --- a/pkg/syncer/spec/spec_process_test.go +++ /dev/null @@ -1,1612 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec - -import ( - "context" - "encoding/json" - "strings" - "sync" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - kcpfakedynamic "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/dynamic/fake" - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - dynamicfake "k8s.io/client-go/dynamic/fake" - "k8s.io/client-go/informers" - kubefake "k8s.io/client-go/kubernetes/fake" - clienttesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/spec/dns" - "github.com/kcp-dev/kcp/pkg/syncer/spec/mutators" - kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var scheme *runtime.Scheme - -func init() { - scheme = runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - _ = kcpcorev1alpha1.AddToScheme(scheme) -} - -type mockedCleaner struct { - lock sync.Mutex - toClean sets.Set[string] -} - -func (c *mockedCleaner) PlanCleaning(key string) { - c.lock.Lock() - defer c.lock.Unlock() - c.toClean.Insert(key) -} - -// CancelCleaning removes the key from the list of keys to be cleaned up. -// If it wasn't planned for deletion, it does nothing. -func (c *mockedCleaner) CancelCleaning(key string) { - c.lock.Lock() - defer c.lock.Unlock() - c.toClean.Delete(key) -} - -func TestDeepEqualApartFromStatus(t *testing.T) { - type args struct { - a, b *unstructured.Unstructured - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "both objects are equals", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - }, - }, - }, - }, - want: true, - }, - { - name: "both objects are equals as are being deleted", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "deletionTimestamp": "2010-11-10T23:00:00Z", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "deletionTimestamp": "2010-11-10T23:00:00Z", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - }, - }, - }, - want: true, - }, - { - name: "both objects are equals even they have different statuses", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "deletionTimestamp": "2010-11-10T23:00:00Z", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - "status": map[string]interface{}{}, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "deletionTimestamp": "2010-11-10T23:00:00Z", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - "status": map[string]interface{}{ - "phase": "Failed", - }, - }, - }, - }, - want: true, - }, - { - name: "both objects are equals even they have a different value in InternalClusterStatusAnnotationPrefix", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - workloadv1alpha1.InternalClusterStatusAnnotationPrefix: "2", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - workloadv1alpha1.InternalClusterStatusAnnotationPrefix: "1", - }, - }, - }, - }, - }, - want: true, - }, - { - name: "not equal as b is missing labels", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - }, - }, - }, - want: false, - }, - { - name: "not equal as b has different labels values", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "another_test_value", - }, - }, - }, - }, - }, - want: false, - }, - { - name: "not equal as b resource is missing annotations", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - "annotation": "this is an annotation", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - }, - }, - }, - }, - want: false, - }, - { - name: "not equal as b resource has different annotations", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - "annotation": "this is an annotation", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - "annotation": "this is an annotation", - "annotation2": "this is another annotation", - }, - }, - }, - }, - }, - want: false, - }, - { - name: "not equal as b resource has finalizers", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - }, - want: false, - }, - { - name: "not equal even objects are equal as A is being deleted", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "deletionTimestamp": "2010-11-10T23:00:00Z", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - "test_annotation": "test_value", - }, - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - "labels": map[string]interface{}{ - "test_label": "test_value", - }, - "annotations": map[string]interface{}{ - "test_annotation": "test_value", - }, - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - }, - want: false, - }, - { - name: "not equal as b has additional fields than a", - args: args{ - a: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - }, - }, - b: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "test_kind", - "apiVersion": "test_version", - "metadata": map[string]interface{}{ - "name": "test_name", - "namespace": "test_namespace", - }, - "other_key": "other_value", - }, - }, - }, - want: false, - }, - } - logger := klog.Background() - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := deepEqualApartFromStatus(logger, tt.args.a, tt.args.b); got != tt.want { - t.Errorf("deepEqualApartFromStatus() = %v, want %v", got, tt.want) - } - }) - } -} - -var _ ddsif.GVRSource = (*mockedGVRSource)(nil) - -type mockedGVRSource struct { -} - -func (s *mockedGVRSource) GVRs() map[schema.GroupVersionResource]ddsif.GVRPartialMetadata { - return map[schema.GroupVersionResource]ddsif.GVRPartialMetadata{ - appsv1.SchemeGroupVersion.WithResource("deployments"): { - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "deployment", - Kind: "Deployment", - }, - }, - { - Version: "v1", - Resource: "namespaces", - }: { - Scope: apiextensionsv1.ClusterScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "namespace", - Kind: "Namespace", - }, - }, - { - Version: "v1", - Resource: "configmaps", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "configmap", - Kind: "ConfigMap", - }, - }, - { - Version: "v1", - Resource: "secrets", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "secret", - Kind: "Secret", - }, - }, - } -} - -func (s *mockedGVRSource) Ready() bool { - return true -} - -func (s *mockedGVRSource) Subscribe() <-chan struct{} { - return make(<-chan struct{}) -} - -func TestSpecSyncerProcess(t *testing.T) { - tests := map[string]struct { - fromNamespace *corev1.Namespace - gvr schema.GroupVersionResource - fromResources []runtime.Object - toResources []runtime.Object - - resourceToProcessName string - resourceToProcessLogicalClusterName string - - upstreamURL string - upstreamLogicalCluster logicalcluster.Name - syncTargetName string - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID - advancedSchedulingEnabled bool - - expectError bool - expectActionsOnFrom []kcptesting.Action - expectActionsOnTo []clienttesting.Action - expectNSCleaningPlanned []string - }{ - "SpecSyncer sync deployment to downstream, upstream gets patched with the finalizer and the object is not created downstream (will be in the next reconciliation)": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, nil), - }, - toResources: []runtime.Object{ - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{ - updateDeploymentAction("test", - toUnstructured(t, changeDeployment( - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - ))), - }, - expectActionsOnTo: []clienttesting.Action{ - createNamespaceSingleClusterAction( - "", - changeUnstructured( - toUnstructured(t, namespace("kcp-33jbiactwhg0", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - })), - removeNilOrEmptyFields, - ), - ), - }, - }, - "SpecSyncer sync to downstream, syncer finalizer already there": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - toResources: []runtime.Object{ - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - createNamespaceSingleClusterAction( - "", - changeUnstructured( - toUnstructured(t, namespace("kcp-33jbiactwhg0", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - })), - removeNilOrEmptyFields, - ), - ), - patchDeploymentSingleClusterAction( - "theDeployment", - "kcp-33jbiactwhg0", - types.ApplyPatchType, - toJson(t, - changeUnstructured( - toUnstructured(t, deployment("theDeployment", "kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, nil)), - setNestedField(map[string]interface{}{}, "status"), - setPodSpec("spec", "template", "spec"), - ), - ), - ), - }, - }, - "SpecSyncer upstream resource has been removed, expect deletion downstream": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - }, - toResources: []runtime.Object{ - namespace("kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - deployment("theDeployment", "kcp-33jbiactwhg0", "root:org:ws", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - deleteDeploymentSingleClusterAction( - "theDeployment", - "kcp-33jbiactwhg0", - ), - }, - expectNSCleaningPlanned: []string{"kcp-33jbiactwhg0"}, - }, - "SpecSyncer deletion: object exist downstream": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - toResources: []runtime.Object{ - namespace("kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - deployment("theDeployment", "kcp-33jbiactwhg0", "root:org:ws", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - "deletion.internal.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": time.Now().Format(time.RFC3339), - }, - []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - deleteDeploymentSingleClusterAction( - "theDeployment", - "kcp-33jbiactwhg0", - ), - }, - expectNSCleaningPlanned: []string{"kcp-33jbiactwhg0"}, - }, - "SpecSyncer deletion: object does not exists downstream, upstream finalizer should be removed": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - toResources: []runtime.Object{ - namespace("kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{ - "another.valid.annotation/this": "value", - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - "deletion.internal.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": time.Now().Format(time.RFC3339), - }, - []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - expectActionsOnFrom: []kcptesting.Action{ - updateDeploymentAction("test", - changeUnstructured( - toUnstructured(t, changeDeployment( - deployment("theDeployment", "test", "root:org:ws", nil, map[string]string{ - "another.valid.annotation/this": "value", - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, nil), - ), - ), - // TODO(jmprusi): Those next changes do "nothing", it's just for the test to pass - // as the test expects some null fields to be there... - setNestedField(map[string]interface{}{}, "metadata", "labels"), - setNestedField([]interface{}{}, "metadata", "finalizers"), - setNestedField(nil, "spec", "selector"), - )), - }, - expectActionsOnTo: []clienttesting.Action{ - deleteDeploymentSingleClusterAction( - "theDeployment", - "kcp-33jbiactwhg0", - ), - }, - }, - "SpecSyncer deletion: upstream object has external finalizer, the object shouldn't be deleted": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - toResources: []runtime.Object{ - namespace("kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - deployment("theDeployment", "kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil, nil), - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{ - "deletion.internal.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": time.Now().Format(time.RFC3339), - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - "finalizers.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "another-controller-finalizer", - }, - []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - patchDeploymentSingleClusterAction( - "theDeployment", - "kcp-33jbiactwhg0", - types.ApplyPatchType, - toJson(t, - changeUnstructured( - toUnstructured(t, deployment("theDeployment", "kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{ - "deletion.internal.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": time.Now().Format(time.RFC3339), - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - "finalizers.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "another-controller-finalizer", - }, nil)), - // TODO(jmprusi): Those next changes do "nothing", it's just for the test to pass - // as the test expects some null fields to be there... - setNestedField(nil, "spec", "selector"), - setNestedField(map[string]interface{}{}, "spec", "strategy"), - setNestedField(map[string]interface{}{ - "metadata": map[string]interface{}{ - "creationTimestamp": nil, - }, - "spec": map[string]interface{}{ - "containers": nil, - }, - }, "spec", "template"), - setNestedField(map[string]interface{}{}, "status"), - setPodSpec("spec", "template", "spec"), - ), - ), - ), - }, - }, - "SpecSyncer with AdvancedScheduling, sync deployment to downstream and apply SpecDiff": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", - map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, - map[string]string{ - "experimental.spec-diff.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "[{\"op\":\"replace\",\"path\":\"/replicas\",\"value\":3}]", - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, - []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - toResources: []runtime.Object{ - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - advancedSchedulingEnabled: true, - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - createNamespaceSingleClusterAction( - "", - changeUnstructured( - toUnstructured(t, namespace("kcp-33jbiactwhg0", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - })), - removeNilOrEmptyFields, - ), - ), - patchDeploymentSingleClusterAction( - "theDeployment", - "kcp-33jbiactwhg0", - types.ApplyPatchType, - toJson(t, - changeUnstructured( - toUnstructured(t, deployment("theDeployment", "kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{ - "experimental.spec-diff.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "[{\"op\":\"replace\",\"path\":\"/replicas\",\"value\":3}]", - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, nil)), - setNestedField(map[string]interface{}{ - "replicas": int64(3), - }, "spec"), - // TODO(jmprusi): Those next changes do "nothing", it's just for the test to pass - // as the test expects some null fields to be there... - setNestedField(nil, "spec", "selector"), - setNestedField(map[string]interface{}{}, "spec", "strategy"), - setNestedField(map[string]interface{}{ - "metadata": map[string]interface{}{ - "creationTimestamp": nil, - }, - "spec": map[string]interface{}{ - "containers": nil, - }, - }, "spec", "template"), - setNestedField(map[string]interface{}{}, "status"), - ), - ), - ), - }, - }, - "SpecSyncer namespace conflict: try to sync to an already existing namespace with a different namespace-locator, expect error": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - toResources: []runtime.Object{ - namespace("kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"ANOTHERNAMESPACE"}`, - }), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - expectError: true, - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{}, - }, - "SpecSyncer namespace conflict: try to sync to an already existing namespace without a namespace-locator, expect error": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - toResources: []runtime.Object{ - namespace("kcp-33jbiactwhg0", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{}, - ), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - expectError: true, - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{}, - }, - "old v0.6.0 namespace locator exists downstream": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, - fromResources: []runtime.Object{ - secretWithFinalizers("foo", "test", "root:org:ws", - map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - "something": "else", - }, - map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, - []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}, - map[string][]byte{ - "a": []byte("b"), - }), - }, - toResources: []runtime.Object{ - namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7"}, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - secret("foo", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - nil, - map[string][]byte{ - "a": []byte("b"), - }), - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "foo", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - patchSecretSingleClusterAction( - "foo", - "kcp-01c0zzvlqsi7n", - types.ApplyPatchType, - []byte(`{"apiVersion":"v1","data":{"a":"Yg=="},"kind":"Secret","metadata":{"annotations":{"internal.workload.kcp.io/workspace-url":"https://kcp.io/clusters/clusterName"},"creationTimestamp":null,"labels":{"internal.workload.kcp.io/cluster":"6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g","something":"else"},"name":"foo","namespace":"kcp-01c0zzvlqsi7n"},"type":"kubernetes.io/service-account-token"}`), - ), - }, - }, - "tenant label is added to existing downstream namespaces": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResources: []runtime.Object{ - secret("default-token-abc", "test", "root:org:ws", - map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, - map[string]string{"kubernetes.io/service-account.name": "default"}, - map[string][]byte{ - "token": []byte("token"), - "namespace": []byte("namespace"), - }), - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - toResources: []runtime.Object{ - namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - dns.MakeServiceAccount("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRole("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeRoleBinding("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - dns.MakeDeployment("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n", "dnsimage"), - service("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - endpoints("kcp-dns-us-west1-1nuzj7pw-2fcy2vpi", "kcp-01c0zzvlqsi7n"), - }, - resourceToProcessLogicalClusterName: "root:org:ws", - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []kcptesting.Action{}, - expectActionsOnTo: []clienttesting.Action{ - updateNamespaceAction("kcp-01c0zzvlqsi7n", - toUnstructured(t, namespace("kcp-01c0zzvlqsi7n", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "kcp.io/tenant-id": "9Fn3Q4y5UDPmCOrYCujwdgCbD9SwOcKdcefYE7"}, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"path":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }))), - patchDeploymentSingleClusterAction( - "theDeployment", - "kcp-01c0zzvlqsi7n", - types.ApplyPatchType, - toJson(t, - changeUnstructured( - toUnstructured(t, deployment("theDeployment", "kcp-01c0zzvlqsi7n", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, map[string]string{ - "internal.workload.kcp.io/workspace-url": "https://kcp.io/clusters/clusterName", - }, nil)), - setNestedField(map[string]interface{}{}, "status"), - setPodSpec("spec", "template", "spec"), - ), - ), - ), - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - logger := klog.FromContext(ctx) - - kcpLogicalCluster := tc.upstreamLogicalCluster - syncTargetUID := tc.syncTargetUID - if tc.syncTargetUID == "" { - syncTargetUID = types.UID("syncTargetUID") - } - - if tc.syncTargetClusterName.Empty() { - tc.syncTargetClusterName = "root:org:ws" - } - - var allFromResources []runtime.Object - allFromResources = append(allFromResources, tc.fromNamespace) - if tc.fromResources != nil { - allFromResources = append(allFromResources, tc.fromResources...) - } - - fromClusterClient := kcpfakedynamic.NewSimpleDynamicClient(scheme, allFromResources...) - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(tc.syncTargetClusterName, tc.syncTargetName) - - toClient := dynamicfake.NewSimpleDynamicClient(scheme, tc.toResources...) - toKubeClient := kubefake.NewSimpleClientset(tc.toResources...) - - ddsifForUpstreamSyncer, err := ddsif.NewDiscoveringDynamicSharedInformerFactory(fromClusterClient, nil, nil, &mockedGVRSource{}, cache.Indexers{}) - require.NoError(t, err) - - ddsifForDownstream, err := ddsif.NewScopedDiscoveringDynamicSharedInformerFactory(toClient, nil, - func(o *metav1.ListOptions) { - o.LabelSelector = workloadv1alpha1.InternalDownstreamClusterLabel + "=" + syncTargetKey - }, - &mockedGVRSource{}, - cache.Indexers{ - indexers.ByNamespaceLocatorIndexName: indexers.IndexByNamespaceLocator, - }, - ) - require.NoError(t, err) - - setupServersideApplyPatchReactor(toClient) - resourceWatcherStarted := setupWatchReactor(tc.gvr.Resource, fromClusterClient) - - // toInformerFactory to watch some DNS-related resources in the dns namespace - toInformerFactory := informers.NewSharedInformerFactoryWithOptions(toKubeClient, time.Hour, - informers.WithNamespace("kcp-01c0zzvlqsi7n")) - - require.NoError(t, err) - - mockedCleaner := &mockedCleaner{ - toClean: sets.New[string](), - } - - secretMutator := mutators.NewSecretMutator() - podspecableMutator := mutators.NewPodspecableMutator( - func(clusterName logicalcluster.Name) (*ddsif.DiscoveringDynamicSharedInformerFactory, error) { - return ddsifForUpstreamSyncer, nil - }, toInformerFactory.Core().V1().Services().Lister(), tc.syncTargetClusterName, tc.syncTargetName, syncTargetUID, "kcp-01c0zzvlqsi7n", false) - - dnsProcessor := dns.NewDNSProcessor(toKubeClient, toInformerFactory, tc.syncTargetName, syncTargetUID, "kcp-01c0zzvlqsi7n", "dnsimage") - controller, err := NewSpecSyncer(logger, kcpLogicalCluster, tc.syncTargetName, syncTargetKey, tc.advancedSchedulingEnabled, - fromClusterClient, toClient, toKubeClient, ddsifForUpstreamSyncer, ddsifForDownstream, mockedCleaner, syncTargetUID, - "kcp-01c0zzvlqsi7n", dnsProcessor, "dnsimage", secretMutator, podspecableMutator) - require.NoError(t, err) - - toInformerFactory.Start(ctx.Done()) - toInformerFactory.WaitForCacheSync(ctx.Done()) - - ddsifForUpstreamSyncer.Start(ctx.Done()) - ddsifForDownstream.Start(ctx.Done()) - - go ddsifForUpstreamSyncer.StartWorker(ctx) - go ddsifForDownstream.StartWorker(ctx) - - <-resourceWatcherStarted - - // The only GVRs we care about are the 4 listed below - t.Logf("waiting for upstream and downstream dynamic informer factories to be synced") - gvrs := sets.New[string]( - schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}.String(), - schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}.String(), - schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}.String(), - schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}.String(), - ) - require.Eventually(t, func() bool { - syncedUpstream, _ := ddsifForUpstreamSyncer.Informers() - foundUpstream := sets.New[string]() - for gvr := range syncedUpstream { - foundUpstream.Insert(gvr.String()) - } - - syncedDownstream, _ := ddsifForDownstream.Informers() - foundDownstream := sets.New[string]() - for gvr := range syncedDownstream { - foundDownstream.Insert(gvr.String()) - } - return foundUpstream.IsSuperset(gvrs) && foundDownstream.IsSuperset(gvrs) - }, wait.ForeverTestTimeout, 100*time.Millisecond) - t.Logf("upstream and downstream dynamic informer factories are synced") - - // Now that we know the informer factories have the GVRs we care about synced, we need to clear the - // actions so our expectations will be accurate. - fromClusterClient.ClearActions() - toClient.ClearActions() - - key := kcpcache.ToClusterAwareKey(tc.resourceToProcessLogicalClusterName, tc.fromNamespace.Name, tc.resourceToProcessName) - _, err = controller.process(context.Background(), - tc.gvr, - key, - ) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - assert.Empty(t, cmp.Diff(tc.expectActionsOnFrom, fromClusterClient.Actions(), cmp.AllowUnexported(logicalcluster.Path{}))) - assert.Empty(t, cmp.Diff(tc.expectActionsOnTo, toClient.Actions())) - mockedCleaner.lock.Lock() - defer mockedCleaner.lock.Unlock() - if tc.expectNSCleaningPlanned != nil { - assert.Equal(t, tc.expectNSCleaningPlanned, sets.List[string](mockedCleaner.toClean)) - } else { - assert.Equal(t, []string{}, sets.List[string](mockedCleaner.toClean)) - } - }) - } -} - -func setupServersideApplyPatchReactor(toClient *dynamicfake.FakeDynamicClient) { - toClient.PrependReactor("patch", "*", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { - patchAction := action.(clienttesting.PatchAction) - if patchAction.GetPatchType() != types.ApplyPatchType { - return false, nil, nil - } - return true, nil, err - }) -} - -func setupWatchReactor(resource string, fromClient *kcpfakedynamic.FakeDynamicClusterClientset) chan struct{} { - watcherStarted := make(chan struct{}) - fromClient.PrependWatchReactor(resource, func(action kcptesting.Action) (bool, watch.Interface, error) { - cluster := action.GetCluster() - gvr := action.GetResource() - ns := action.GetNamespace() - var watcher watch.Interface - var err error - switch cluster { - case logicalcluster.Wildcard: - watcher, err = fromClient.Tracker().Watch(gvr, ns) - default: - watcher, err = fromClient.Tracker().Cluster(cluster).Watch(gvr, ns) - } - close(watcherStarted) - return true, watcher, err - }) - return watcherStarted -} - -func namespace(name, clusterName string, labels, annotations map[string]string) *corev1.Namespace { - if clusterName != "" { - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName - } - - return &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - Annotations: annotations, - }, - } -} - -func deployment(name, namespace, clusterName string, labels, annotations map[string]string, finalizers []string) *appsv1.Deployment { - if clusterName != "" { - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName - } - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - Annotations: annotations, - Finalizers: finalizers, - }, - } -} - -func endpoints(name, namespace string) *corev1.Endpoints { - return &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Subsets: []corev1.EndpointSubset{ - {Addresses: []corev1.EndpointAddress{ - { - IP: "8.8.8.8", - }}}, - }, - } -} - -func service(name, namespace string) *corev1.Service { - svc := dns.MakeService(name, namespace) - svc.Spec.ClusterIP = "8.8.8.8" - return svc -} - -func secret(name, namespace, clusterName string, labels, annotations map[string]string, data map[string][]byte) *corev1.Secret { - return secretWithFinalizers(name, namespace, clusterName, labels, annotations, nil, data) -} - -func secretWithFinalizers(name, namespace, clusterName string, labels, annotations map[string]string, finalizers []string, data map[string][]byte) *corev1.Secret { - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName - - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - Annotations: annotations, - Finalizers: finalizers, - }, - Data: data, - StringData: nil, - Type: corev1.SecretTypeServiceAccountToken, - } -} - -type deploymentChange func(*appsv1.Deployment) - -func changeDeployment(in *appsv1.Deployment, changes ...deploymentChange) *appsv1.Deployment { - for _, change := range changes { - change(in) - } - return in -} - -func toJson(t require.TestingT, object runtime.Object) []byte { - result, err := json.Marshal(object) - require.NoError(t, err) - return result -} - -func toUnstructured(t require.TestingT, obj metav1.Object) *unstructured.Unstructured { - var result unstructured.Unstructured - err := scheme.Convert(obj, &result, nil) - require.NoError(t, err) - - return &result -} - -type unstructuredChange func(d *unstructured.Unstructured) - -func changeUnstructured(in *unstructured.Unstructured, changes ...unstructuredChange) *unstructured.Unstructured { - for _, change := range changes { - change(in) - } - return in -} - -func removeNilOrEmptyFields(in *unstructured.Unstructured) { - if val, exists, _ := unstructured.NestedFieldNoCopy(in.UnstructuredContent(), "metadata", "creationTimestamp"); val == nil && exists { - unstructured.RemoveNestedField(in.UnstructuredContent(), "metadata", "creationTimestamp") - } - if val, exists, _ := unstructured.NestedMap(in.UnstructuredContent(), "spec"); len(val) == 0 && exists { - delete(in.Object, "spec") - } - if val, exists, _ := unstructured.NestedMap(in.UnstructuredContent(), "status"); len(val) == 0 && exists { - delete(in.Object, "status") - } -} - -func setNestedField(value interface{}, fields ...string) unstructuredChange { - return func(d *unstructured.Unstructured) { - _ = unstructured.SetNestedField(d.UnstructuredContent(), value, fields...) - } -} - -func setPodSpec(fields ...string) unstructuredChange { - var j interface{} - err := json.Unmarshal([]byte(`{ - "dnsConfig": { - "nameservers": [ "8.8.8.8" ], - "options": [{ "name": "ndots", "value": "5"}], - "searches": ["test.svc.cluster.local", "svc.cluster.local", "cluster.local"] - }, - "dnsPolicy": "None", - "automountServiceAccountToken":false, - "containers":null, - "volumes":[ - {"name":"kcp-api-access","projected":{ - "defaultMode":420, - "sources":[ - {"secret":{"items":[{"key":"token","path":"token"},{"key":"namespace","path":"namespace"}],"name": "kcp-default-token-abc"}}, - {"configMap":{"items":[{"key":"ca.crt","path":"ca.crt"}],"name":"kcp-root-ca.crt"}} - ] - }} - ] -}`), &j) - if err != nil { - panic(err) - } - return setNestedField(j, fields...) -} - -func deploymentAction(verb, namespace string, subresources ...string) kcptesting.ActionImpl { - return kcptesting.ActionImpl{ - Namespace: namespace, - ClusterPath: logicalcluster.NewPath("root:org:ws"), - Verb: verb, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - Subresource: strings.Join(subresources, "/"), - } -} - -func updateDeploymentAction(namespace string, object runtime.Object, subresources ...string) kcptesting.UpdateActionImpl { - return kcptesting.UpdateActionImpl{ - ActionImpl: deploymentAction("update", namespace, subresources...), - Object: object, - } -} - -func deploymentSingleClusterAction(verb, namespace string, subresources ...string) clienttesting.ActionImpl { - return clienttesting.ActionImpl{ - Namespace: namespace, - Verb: verb, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - Subresource: strings.Join(subresources, "/"), - } -} - -func namespaceSingleClusterAction(verb string, subresources ...string) clienttesting.ActionImpl { - return clienttesting.ActionImpl{ - Namespace: "", - Verb: verb, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, - Subresource: strings.Join(subresources, "/"), - } -} - -func createNamespaceSingleClusterAction(name string, object runtime.Object) clienttesting.CreateActionImpl { - return clienttesting.CreateActionImpl{ - ActionImpl: namespaceSingleClusterAction("create"), - Name: name, - Object: object, - } -} - -func patchDeploymentSingleClusterAction(name, namespace string, patchType types.PatchType, patch []byte, subresources ...string) clienttesting.PatchActionImpl { - return clienttesting.PatchActionImpl{ - ActionImpl: deploymentSingleClusterAction("patch", namespace, subresources...), - Name: name, - PatchType: patchType, - Patch: patch, - } -} - -func deleteDeploymentSingleClusterAction(name, namespace string, subresources ...string) clienttesting.DeleteActionImpl { - return clienttesting.DeleteActionImpl{ - ActionImpl: deploymentSingleClusterAction("delete", namespace, subresources...), - Name: name, - DeleteOptions: metav1.DeleteOptions{}, - } -} - -func secretSingleClusterAction(verb, namespace string, subresources ...string) clienttesting.ActionImpl { - return clienttesting.ActionImpl{ - Namespace: namespace, - Verb: verb, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, - Subresource: strings.Join(subresources, "/"), - } -} - -func patchSecretSingleClusterAction(name, namespace string, patchType types.PatchType, patch []byte, subresources ...string) clienttesting.PatchActionImpl { - return clienttesting.PatchActionImpl{ - ActionImpl: secretSingleClusterAction("patch", namespace, subresources...), - Name: name, - PatchType: patchType, - Patch: patch, - } -} - -func namespaceAction(verb, namespace string, subresources ...string) clienttesting.ActionImpl { - return clienttesting.ActionImpl{ - Verb: verb, - Resource: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}, - Subresource: strings.Join(subresources, "/"), - } -} - -func updateNamespaceAction(namespace string, object runtime.Object, subresources ...string) clienttesting.UpdateActionImpl { - return clienttesting.UpdateActionImpl{ - ActionImpl: namespaceAction("update", namespace, subresources...), - Object: object, - } -} diff --git a/pkg/syncer/status/status_controller.go b/pkg/syncer/status/status_controller.go deleted file mode 100644 index 79f0ef07017..00000000000 --- a/pkg/syncer/status/status_controller.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "context" - "fmt" - "time" - - "github.com/go-logr/logr" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const ( - controllerName = "kcp-workload-syncer-status" -) - -var namespaceGVR schema.GroupVersionResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - -type Controller struct { - queue workqueue.RateLimitingInterface - - getUpstreamClient func(clusterName logicalcluster.Name) (dynamic.Interface, error) - downstreamClient dynamic.Interface - - getUpstreamLister func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) - getDownstreamLister func(gvr schema.GroupVersionResource) (cache.GenericLister, error) - - syncTargetName string - syncTargetWorkspace logicalcluster.Name - syncTargetUID types.UID - syncTargetKey string - advancedSchedulingEnabled bool -} - -func NewStatusSyncer(syncerLogger logr.Logger, syncTargetClusterName logicalcluster.Name, syncTargetName, syncTargetKey string, advancedSchedulingEnabled bool, - getShardAccess synctarget.GetShardAccessFunc, - downstreamClient dynamic.Interface, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], - syncTargetUID types.UID) (*Controller, error) { - c := &Controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName), - - getUpstreamClient: func(clusterName logicalcluster.Name) (dynamic.Interface, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - return shardAccess.SyncerClient.Cluster(clusterName.Path()), nil - }, - downstreamClient: downstreamClient, - - getDownstreamLister: func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory - should retry", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", gvr) - } - return informer.Lister(), nil - }, - getUpstreamLister: func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - - informers, notSynced := shardAccess.SyncerDDSIF.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the upstream syncer informer factory - should retry", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the upstream syncer informer factory", gvr) - } - return informer.Lister().ByCluster(clusterName), nil - }, - - syncTargetName: syncTargetName, - syncTargetWorkspace: syncTargetClusterName, - syncTargetUID: syncTargetUID, - syncTargetKey: syncTargetKey, - advancedSchedulingEnabled: advancedSchedulingEnabled, - } - - logger := logging.WithReconciler(syncerLogger, controllerName) - - namespaceGVR := corev1.SchemeGroupVersion.WithResource("namespaces") - - ddsifForDownstream.AddEventHandler( - ddsif.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - unstrObj, ok := obj.(*unstructured.Unstructured) - if !ok { - runtime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", unstrObj)) - return - } - if unstrObj.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] == string(workloadv1alpha1.ResourceStateUpsync) { - return - } - - c.AddToQueue(gvr, obj, logger) - }, - UpdateFunc: func(gvr schema.GroupVersionResource, oldObj, newObj interface{}) { - if gvr == namespaceGVR { - return - } - oldUnstrob := oldObj.(*unstructured.Unstructured) - newUnstrob := newObj.(*unstructured.Unstructured) - if newUnstrob.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] == string(workloadv1alpha1.ResourceStateUpsync) { - return - } - if !deepEqualFinalizersAndStatus(oldUnstrob, newUnstrob) { - c.AddToQueue(gvr, newUnstrob, logger) - } - }, - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = d.Obj - } - unstrObj, ok := obj.(*unstructured.Unstructured) - if !ok { - runtime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", unstrObj)) - return - } - if unstrObj.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] == string(workloadv1alpha1.ResourceStateUpsync) { - return - } - - c.AddToQueue(gvr, obj, logger) - }, - }) - - return c, nil -} - -type queueKey struct { - gvr schema.GroupVersionResource - key string // meta namespace key -} - -func (c *Controller) AddToQueue(gvr schema.GroupVersionResource, obj interface{}, logger logr.Logger) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info("queueing GVR", "gvr", gvr.String()) - c.queue.Add( - queueKey{ - gvr: gvr, - key: key, - }, - ) -} - -// Start starts N worker processes processing work items. -func (c *Controller) Start(ctx context.Context, numThreads int) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting syncer workers") - defer logger.Info("Stopping syncer workers") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - - <-ctx.Done() -} - -// startWorker processes work items until stopCh is closed. -func (c *Controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *Controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - key, quit := c.queue.Get() - if quit { - return false - } - qk := key.(queueKey) - - logger := logging.WithQueueKey(klog.FromContext(ctx), qk.key).WithValues("gvr", qk.gvr.String()) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, qk.gvr, qk.key); err != nil { - runtime.HandleError(fmt.Errorf("%s failed to sync %q, err: %w", controllerName, key, err)) - c.queue.AddRateLimited(key) - return true - } - - c.queue.Forget(key) - - return true -} diff --git a/pkg/syncer/status/status_process.go b/pkg/syncer/status/status_process.go deleted file mode 100644 index a8b62c94968..00000000000 --- a/pkg/syncer/status/status_process.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strings" - - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - workloadcliplugin "github.com/kcp-dev/kcp/pkg/cliplugins/workload/plugin" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -func deepEqualFinalizersAndStatus(oldUnstrob, newUnstrob *unstructured.Unstructured) bool { - newFinalizers := newUnstrob.GetFinalizers() - oldFinalizers := oldUnstrob.GetFinalizers() - - newStatus := newUnstrob.UnstructuredContent()["status"] - oldStatus := oldUnstrob.UnstructuredContent()["status"] - - return equality.Semantic.DeepEqual(oldFinalizers, newFinalizers) && equality.Semantic.DeepEqual(oldStatus, newStatus) -} - -func (c *Controller) process(ctx context.Context, gvr schema.GroupVersionResource, key string) error { - logger := klog.FromContext(ctx) - - // from downstream - downstreamNamespace, downstreamName, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - logger.Error(err, "Invalid key") - return nil - } - // TODO(sttts): do not reference the cli plugin here - if strings.HasPrefix(downstreamNamespace, workloadcliplugin.SyncerIDPrefix) { - // skip syncer namespace - return nil - } - - logger = logger.WithValues(DownstreamNamespace, downstreamNamespace, DownstreamName, downstreamName) - - downstreamLister, err := c.getDownstreamLister(gvr) - if err != nil { - return err - } - - var namespaceLocator *shared.NamespaceLocator - var locatorExists bool - - if downstreamNamespace != "" { - downstreamNamespaceLister, err := c.getDownstreamLister(namespaceGVR) - if err != nil { - return err - } - - nsObj, err := downstreamNamespaceLister.Get(downstreamNamespace) - if err != nil { - logger.Error(err, "Error retrieving downstream namespace from downstream lister") - return nil - } - nsMeta, ok := nsObj.(metav1.Object) - if !ok { - logger.Info(fmt.Sprintf("Error: downstream namespace expected to be metav1.Object, got %T", nsObj)) - return nil - } - - namespaceLocator, locatorExists, err = shared.LocatorFromAnnotations(nsMeta.GetAnnotations()) - if err != nil { - logger.Error(err, "Error decoding annotation on downstream namespace") - return nil - } - if !locatorExists || namespaceLocator == nil { - // Only sync resources for the configured logical cluster to ensure - // that syncers for multiple logical clusters can coexist. - return nil - } - } - - var resourceExists bool - obj, err := downstreamLister.ByNamespace(downstreamNamespace).Get(downstreamName) - if err == nil { - resourceExists = true - } else if !apierrors.IsNotFound(err) { - return err - } - - if downstreamNamespace == "" { - if !resourceExists { - // TODO(davidfestal): The downstream object doesn't exist, but we cannot remove the finalizer - // on the upstream resource since we dont have the locator to locate the upstream resource. - // That should be fixed. - return nil - } - - objMeta, ok := obj.(metav1.Object) - if !ok { - logger.Info(fmt.Sprintf("Error: downstream cluster-wide resource expected to be metav1.Object, got %T", obj)) - return nil - } - namespaceLocator, locatorExists, err = shared.LocatorFromAnnotations(objMeta.GetAnnotations()) - if err != nil { - logger.Error(err, "Error decoding annotation on downstream cluster-wide resource") - return nil - } - if !locatorExists || namespaceLocator == nil { - // Only sync resources for the configured logical cluster to ensure - // that syncers for multiple logical clusters can coexist. - return nil - } - } - if namespaceLocator.SyncTarget.UID != c.syncTargetUID || namespaceLocator.SyncTarget.ClusterName != c.syncTargetWorkspace.String() { - // not our resource. - return nil - } - - upstreamNamespace := namespaceLocator.Namespace - upstreamClusterName := namespaceLocator.ClusterName - upstreamName := shared.GetUpstreamResourceName(gvr, downstreamName) - - logger = logger.WithValues(logging.WorkspaceKey, upstreamClusterName, logging.NamespaceKey, upstreamNamespace, logging.NameKey, upstreamName) - ctx = klog.NewContext(ctx, logger) - - upstreamLister, err := c.getUpstreamLister(upstreamClusterName, gvr) - if err != nil { - return err - } - - upstreamClient, err := c.getUpstreamClient(upstreamClusterName) - if err != nil { - return err - } - - if !resourceExists { - logger.Info("Downstream object does not exist. Removing finalizer on upstream object") - return shared.EnsureUpstreamFinalizerRemoved(ctx, gvr, upstreamLister, upstreamClient, upstreamNamespace, c.syncTargetKey, shared.GetUpstreamResourceName(gvr, downstreamName)) - } - - // update upstream status - u, ok := obj.(*unstructured.Unstructured) - if !ok { - return fmt.Errorf("object to synchronize is expected to be Unstructured, but is %T", obj) - } - if u.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+c.syncTargetKey] == string(workloadv1alpha1.ResourceStateUpsync) { - logger.V(4).Info("do not update the status in upstream, since the downstream resource is in Upsync mode") - return nil - } - - return c.updateStatusInUpstream(ctx, gvr, upstreamClient, upstreamLister, upstreamNamespace, upstreamName, u) -} - -func (c *Controller) updateStatusInUpstream(ctx context.Context, gvr schema.GroupVersionResource, upstreamClient dynamic.Interface, upstreamLister cache.GenericLister, upstreamNamespace, upstreamName string, downstreamObj *unstructured.Unstructured) error { - logger := klog.FromContext(ctx) - - downstreamStatus, statusExists, err := unstructured.NestedFieldCopy(downstreamObj.UnstructuredContent(), "status") - if err != nil { - return err - } else if !statusExists { - logger.V(5).Info("Downstream resource doesn't contain a status. Skipping updating the status of upstream resource") - return nil - } - - existingObj, err := upstreamLister.ByNamespace(upstreamNamespace).Get(upstreamName) - if err != nil { - logger.Error(err, "Error getting upstream resource") - return err - } - - existing, ok := existingObj.(*unstructured.Unstructured) - if !ok { - logger.Info(fmt.Sprintf("Error: Upstream resource expected to be *unstructured.Unstructured, got %T", existing)) - return nil - } - - newUpstream := existing.DeepCopy() - - if c.advancedSchedulingEnabled { - statusAnnotationValue, err := json.Marshal(downstreamStatus) - if err != nil { - return err - } - newUpstreamAnnotations := newUpstream.GetAnnotations() - if newUpstreamAnnotations == nil { - newUpstreamAnnotations = make(map[string]string) - } - newUpstreamAnnotations[workloadv1alpha1.InternalClusterStatusAnnotationPrefix+c.syncTargetKey] = string(statusAnnotationValue) - newUpstream.SetAnnotations(newUpstreamAnnotations) - - if reflect.DeepEqual(existing, newUpstream) { - logger.V(2).Info("No need to update the status annotation of upstream resource") - return nil - } - - if upstreamNamespace != "" { - // In this case we will update the whole resource, not the status, as the status is in the annotation. - // this is specific to the advancedScheduling flag. - _, err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Update(ctx, newUpstream, metav1.UpdateOptions{}) - } else { - _, err = upstreamClient.Resource(gvr).Update(ctx, newUpstream, metav1.UpdateOptions{}) - } - - if err != nil { - logger.Error(err, "Failed updating the status annotation of upstream resource") - return err - } - logger.Info("Updated the status annotation of upstream resource") - return nil - } - - if err := unstructured.SetNestedField(newUpstream.UnstructuredContent(), downstreamStatus, "status"); err != nil { - logger.Error(err, "Failed setting status of upstream resource") - return err - } - - // TODO (davidfestal): Here in the future we might want to also set some fields of the Spec, per resource type, for example: - // clusterIP for service, or other field values set by SyncTarget cluster admission. - // But for now let's only update the status. - if upstreamNamespace != "" { - _, err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).UpdateStatus(ctx, newUpstream, metav1.UpdateOptions{}) - } else { - _, err = upstreamClient.Resource(gvr).UpdateStatus(ctx, newUpstream, metav1.UpdateOptions{}) - } - if err != nil { - logger.Error(err, "Failed updating status of upstream resource") - return err - } - - logger.Info("Updated status of upstream resource") - return nil -} diff --git a/pkg/syncer/status/status_process_test.go b/pkg/syncer/status/status_process_test.go deleted file mode 100644 index 2399e1ab73b..00000000000 --- a/pkg/syncer/status/status_process_test.go +++ /dev/null @@ -1,821 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package status - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - kcpfakedynamic "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/dynamic/fake" - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - dynamicfake "k8s.io/client-go/dynamic/fake" - clienttesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var scheme *runtime.Scheme - -func init() { - scheme = runtime.NewScheme() - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) -} - -func TestDeepEqualFinalizersAndStatus(t *testing.T) { - for _, c := range []struct { - desc string - old, new *unstructured.Unstructured - want bool - }{{ - desc: "both objects have same status", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - want: true, - }, { - desc: "both objects have status; different", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "status": map[string]string{ - "cool": "no", - }, - }, - }, - want: false, - }, { - desc: "one object doesn't have status", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]string{}, - }, - }, - want: false, - }, { - desc: "both objects don't have status", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]string{}, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "spec": map[string]string{}, - }, - }, - want: true, - }, { - desc: "both objects have the same finalizers", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - want: true, - }, { - desc: "one object doesn't have finalizers", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{}, - }, - }, - want: false, - }, { - desc: "both objects don't have finalizers", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{}, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{}, - }, - }, - want: true, - }, { - desc: "objects have different finalizers", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.2", - "finalizer.3", - }, - }, - }, - }, - want: false, - }, { - desc: "one object doesn't have finalizers", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{}, - }, - }, - want: false, - }, { - desc: "objects have the same status and finalizers but different labels", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "labels": map[string]interface{}{ - "cool": "yes", - }, - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "labels": map[string]interface{}{ - "cool": "no!", - }, - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - want: true, - }, - { - desc: "objects have equal finalizers and statuses", - old: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - new: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "finalizers": []interface{}{ - "finalizer.1", - "finalizer.2", - }, - }, - "status": map[string]string{ - "cool": "yes", - }, - }, - }, - want: true, - }} { - t.Run(c.desc, func(t *testing.T) { - got := deepEqualFinalizersAndStatus(c.old, c.new) - if got != c.want { - t.Fatalf("got %t, want %t", got, c.want) - } - }) - } -} - -var _ ddsif.GVRSource = (*mockedGVRSource)(nil) - -type mockedGVRSource struct { -} - -func (s *mockedGVRSource) GVRs() map[schema.GroupVersionResource]ddsif.GVRPartialMetadata { - return map[schema.GroupVersionResource]ddsif.GVRPartialMetadata{ - appsv1.SchemeGroupVersion.WithResource("deployments"): { - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "deployment", - Kind: "Deployment", - }, - }, - { - Version: "v1", - Resource: "namespaces", - }: { - Scope: apiextensionsv1.ClusterScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "namespace", - Kind: "Namespace", - }, - }, - { - Version: "v1", - Resource: "configmaps", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "configmap", - Kind: "ConfigMap", - }, - }, - { - Version: "v1", - Resource: "secrets", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "secret", - Kind: "Secret", - }, - }, - } -} - -func (s *mockedGVRSource) Ready() bool { - return true -} - -func (s *mockedGVRSource) Subscribe() <-chan struct{} { - return make(<-chan struct{}) -} - -func TestStatusSyncerProcess(t *testing.T) { - tests := map[string]struct { - fromNamespace *corev1.Namespace - gvr schema.GroupVersionResource - fromResource runtime.Object - toResources []runtime.Object - - resourceToProcessName string - - upstreamURL string - upstreamLogicalCluster logicalcluster.Name - syncTargetName string - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID - advancedSchedulingEnabled bool - - expectError bool - expectActionsOnFrom []clienttesting.Action - expectActionsOnTo []kcptesting.Action - }{ - "StatusSyncer upsert to existing resource": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResource: changeDeployment( - deployment("theDeployment", "kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil, nil), - addDeploymentStatus(appsv1.DeploymentStatus{ - Replicas: 15, - })), - toResources: []runtime.Object{ - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil, nil), - }, - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []clienttesting.Action{}, - expectActionsOnTo: []kcptesting.Action{ - updateDeploymentAction("test", - toUnstructured(t, changeDeployment( - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil, nil), - addDeploymentStatus(appsv1.DeploymentStatus{ - Replicas: 15, - }))), - "status"), - }, - }, - "StatusSyncer upsert to existing resource but owned by another synctarget, expect no update": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"ANOTHERSYNCTARGETUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResource: changeDeployment( - deployment("theDeployment", "kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil, nil), - addDeploymentStatus(appsv1.DeploymentStatus{ - Replicas: 15, - })), - toResources: []runtime.Object{ - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil, nil), - }, - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []clienttesting.Action{}, - expectActionsOnTo: []kcptesting.Action{}, - }, - "StatusSyncer upstream deletion": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResource: changeDeployment( - deployment("theDeployment", "kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", nil, nil, nil), - addDeploymentStatus(appsv1.DeploymentStatus{ - Replicas: 15, - })), - toResources: []runtime.Object{ - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil, nil), - }, - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - - expectActionsOnFrom: []clienttesting.Action{}, - expectActionsOnTo: []kcptesting.Action{}, - }, - "StatusSyncer with AdvancedScheduling, update status upstream": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResource: changeDeployment( - deployment("theDeployment", "kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil, nil), - addDeploymentStatus(appsv1.DeploymentStatus{ - Replicas: 15, - })), - toResources: []runtime.Object{ - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, nil, nil), - }, - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - advancedSchedulingEnabled: true, - - expectActionsOnFrom: []clienttesting.Action{}, - expectActionsOnTo: []kcptesting.Action{ - updateDeploymentAction("test", - toUnstructured(t, changeDeployment( - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "experimental.status.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "{\"replicas\":15}", - }, nil)))), - }, - }, - "StatusSyncer with AdvancedScheduling, deletion: object exists upstream": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResource: changeDeployment( - deployment("theDeployment", "kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, nil, nil), - addDeploymentStatus(appsv1.DeploymentStatus{ - Replicas: 15, - })), - toResources: []runtime.Object{ - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "deletion.internal.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": time.Now().Format(time.RFC3339), - "experimental.status.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "{\"replicas\":15}", - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - advancedSchedulingEnabled: true, - - expectActionsOnFrom: []clienttesting.Action{}, - expectActionsOnTo: []kcptesting.Action{}, - }, - "StatusSyncer with AdvancedScheduling, deletion: object does not exists upstream": { - upstreamLogicalCluster: "root:org:ws", - fromNamespace: namespace("kcp0124d7647eb6a00b1fcb6f2252201601634989dd79deb7375c373973", "", - map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }, - map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget":{"cluster":"root:org:ws","name":"us-west1","uid":"syncTargetUID"},"cluster":"root:org:ws","namespace":"test"}`, - }), - gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - fromResource: nil, - toResources: []runtime.Object{ - namespace("test", "root:org:ws", map[string]string{"state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync"}, nil), - deployment("theDeployment", "test", "root:org:ws", map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Sync", - }, map[string]string{ - "deletion.internal.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": time.Now().Format(time.RFC3339), - "experimental.status.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": `{"replicas":15}`, - }, []string{"workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"}), - }, - resourceToProcessName: "theDeployment", - syncTargetName: "us-west1", - advancedSchedulingEnabled: true, - - expectActionsOnFrom: []clienttesting.Action{}, - expectActionsOnTo: []kcptesting.Action{ - updateDeploymentAction("test", - changeUnstructured( - toUnstructured(t, changeDeployment( - deployment("theDeployment", "test", "root:org:ws", map[string]string{}, map[string]string{}, nil))), - // The following "changes" are required for the test to pass, as it expects some empty/nil fields to be there - setNestedField(map[string]interface{}{}, "metadata", "labels"), - setNestedField([]interface{}{}, "metadata", "finalizers"), - setNestedField(nil, "spec", "selector"), - ), - ), - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - logger := klog.FromContext(ctx) - - kcpLogicalCluster := tc.upstreamLogicalCluster - if tc.syncTargetUID == "" { - tc.syncTargetUID = types.UID("syncTargetUID") - } - if tc.syncTargetClusterName.Empty() { - tc.syncTargetClusterName = "root:org:ws" - } - - var allFromResources []runtime.Object - allFromResources = append(allFromResources, tc.fromNamespace) - if tc.fromResource != nil { - allFromResources = append(allFromResources, tc.fromResource) - } - fromClient := dynamicfake.NewSimpleDynamicClient(scheme, allFromResources...) - toClusterClient := kcpfakedynamic.NewSimpleDynamicClient(scheme, tc.toResources...) - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(tc.syncTargetClusterName, tc.syncTargetName) - - ddsifForUpstreamSyncer, err := ddsif.NewDiscoveringDynamicSharedInformerFactory(toClusterClient, nil, nil, &mockedGVRSource{}, cache.Indexers{}) - require.NoError(t, err) - - ddsifForDownstream, err := ddsif.NewScopedDiscoveringDynamicSharedInformerFactory(fromClient, nil, - func(o *metav1.ListOptions) { - o.LabelSelector = workloadv1alpha1.InternalDownstreamClusterLabel + "=" + syncTargetKey - }, - &mockedGVRSource{}, - cache.Indexers{ - indexers.ByNamespaceLocatorIndexName: indexers.IndexByNamespaceLocator, - }, - ) - require.NoError(t, err) - - setupServersideApplyPatchReactor(toClusterClient) - fromClientResourceWatcherStarted := setupWatchReactor(t, tc.gvr.Resource, fromClient) - toClientResourceWatcherStarted := setupClusterWatchReactor(t, tc.gvr.Resource, toClusterClient) - - getShardAccess := func(clusterName logicalcluster.Name) (synctarget.ShardAccess, bool, error) { - return synctarget.ShardAccess{ - SyncerClient: toClusterClient, - SyncerDDSIF: ddsifForUpstreamSyncer, - }, true, nil - } - controller, err := NewStatusSyncer(logger, kcpLogicalCluster, tc.syncTargetName, syncTargetKey, tc.advancedSchedulingEnabled, getShardAccess, fromClient, ddsifForDownstream, tc.syncTargetUID) - require.NoError(t, err) - - ddsifForUpstreamSyncer.Start(ctx.Done()) - ddsifForDownstream.Start(ctx.Done()) - - go ddsifForUpstreamSyncer.StartWorker(ctx) - go ddsifForDownstream.StartWorker(ctx) - - <-fromClientResourceWatcherStarted - <-toClientResourceWatcherStarted - - // The only GVRs we care about are the 4 listed below - t.Logf("waiting for upstream and downstream dynamic informer factories to be synced") - gvrs := sets.New[string]( - schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}.String(), - schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}.String(), - schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}.String(), - schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}.String(), - ) - require.Eventually(t, func() bool { - syncedUpstream, _ := ddsifForUpstreamSyncer.Informers() - foundUpstream := sets.New[string]() - for gvr := range syncedUpstream { - foundUpstream.Insert(gvr.String()) - } - - syncedDownstream, _ := ddsifForDownstream.Informers() - foundDownstream := sets.New[string]() - for gvr := range syncedDownstream { - foundDownstream.Insert(gvr.String()) - } - return foundUpstream.IsSuperset(gvrs) && foundDownstream.IsSuperset(gvrs) - }, wait.ForeverTestTimeout, 100*time.Millisecond) - t.Logf("upstream and downstream dynamic informer factories are synced") - - // Now that we know the informer factories have the GVRs we care about synced, we need to clear the - // actions so our expectations will be accurate. - fromClient.ClearActions() - toClusterClient.ClearActions() - - key := tc.fromNamespace.Name + "/" + tc.resourceToProcessName - err = controller.process(context.Background(), - schema.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - }, - key, - ) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - assert.Empty(t, cmp.Diff(tc.expectActionsOnFrom, fromClient.Actions())) - assert.Empty(t, cmp.Diff(tc.expectActionsOnTo, toClusterClient.Actions(), cmp.AllowUnexported(logicalcluster.Path{}))) - }) - } -} - -func setupServersideApplyPatchReactor(toClient *kcpfakedynamic.FakeDynamicClusterClientset) { - toClient.PrependReactor("patch", "*", func(action kcptesting.Action) (handled bool, ret runtime.Object, err error) { - patchAction := action.(kcptesting.PatchAction) - if patchAction.GetPatchType() != types.ApplyPatchType { - return false, nil, nil - } - return true, nil, err - }) -} - -func setupWatchReactor(t *testing.T, resource string, client *dynamicfake.FakeDynamicClient) chan struct{} { - t.Helper() - watcherStarted := make(chan struct{}) - client.PrependWatchReactor(resource, func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := client.Tracker().Watch(gvr, ns) - if err != nil { - return false, nil, err - } - t.Logf("%s: watcher started", t.Name()) - close(watcherStarted) - return true, watch, nil - }) - return watcherStarted -} - -func setupClusterWatchReactor(t *testing.T, resource string, client *kcpfakedynamic.FakeDynamicClusterClientset) chan struct{} { - t.Helper() - watcherStarted := make(chan struct{}) - client.PrependWatchReactor(resource, func(action kcptesting.Action) (bool, watch.Interface, error) { - cluster := action.GetCluster() - gvr := action.GetResource() - ns := action.GetNamespace() - var watcher watch.Interface - var err error - switch cluster { - case logicalcluster.Wildcard: - watcher, err = client.Tracker().Watch(gvr, ns) - default: - watcher, err = client.Tracker().Cluster(cluster).Watch(gvr, ns) - } - t.Logf("%s: cluster watcher started", t.Name()) - close(watcherStarted) - return true, watcher, err - }) - return watcherStarted -} - -func namespace(name, clusterName string, labels, annotations map[string]string) *corev1.Namespace { - if clusterName != "" { - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName - } - - return &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - Annotations: annotations, - }, - } -} - -func deployment(name, namespace, clusterName string, labels, annotations map[string]string, finalizers []string) *appsv1.Deployment { - if clusterName != "" { - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName - } - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - Annotations: annotations, - Finalizers: finalizers, - }, - } -} - -type deploymentChange func(*appsv1.Deployment) - -func changeDeployment(in *appsv1.Deployment, changes ...deploymentChange) *appsv1.Deployment { - for _, change := range changes { - change(in) - } - return in -} - -func addDeploymentStatus(status appsv1.DeploymentStatus) deploymentChange { - return func(d *appsv1.Deployment) { - d.Status = status - } -} - -func toUnstructured(t require.TestingT, obj metav1.Object) *unstructured.Unstructured { - var result unstructured.Unstructured - err := scheme.Convert(obj, &result, nil) - require.NoError(t, err) - - return &result -} - -type unstructuredChange func(d *unstructured.Unstructured) - -func changeUnstructured(in *unstructured.Unstructured, changes ...unstructuredChange) *unstructured.Unstructured { - for _, change := range changes { - change(in) - } - return in -} - -func setNestedField(value interface{}, fields ...string) unstructuredChange { - return func(d *unstructured.Unstructured) { - _ = unstructured.SetNestedField(d.UnstructuredContent(), value, fields...) - } -} - -func deploymentAction(verb, namespace string, subresources ...string) kcptesting.ActionImpl { - return kcptesting.ActionImpl{ - Namespace: namespace, - ClusterPath: logicalcluster.NewPath("root:org:ws"), - Verb: verb, - Resource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, - Subresource: strings.Join(subresources, "/"), - } -} - -func updateDeploymentAction(namespace string, object runtime.Object, subresources ...string) kcptesting.UpdateActionImpl { - return kcptesting.UpdateActionImpl{ - ActionImpl: deploymentAction("update", namespace, subresources...), - Object: object, - } -} diff --git a/pkg/syncer/syncer.go b/pkg/syncer/syncer.go deleted file mode 100644 index 06da1fe7570..00000000000 --- a/pkg/syncer/syncer.go +++ /dev/null @@ -1,555 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "context" - "fmt" - "time" - - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - kubernetesinformers "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/pkg/version" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/syncer/controllermanager" - "github.com/kcp-dev/kcp/pkg/syncer/endpoints" - "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/namespace" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - "github.com/kcp-dev/kcp/pkg/syncer/spec" - "github.com/kcp-dev/kcp/pkg/syncer/spec/dns" - "github.com/kcp-dev/kcp/pkg/syncer/spec/mutators" - "github.com/kcp-dev/kcp/pkg/syncer/status" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" - "github.com/kcp-dev/kcp/pkg/syncer/upsync" - kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" - kcpclusterclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -const ( - AdvancedSchedulingFeatureAnnotation = "featuregates.experimental.workload.kcp.io/advancedscheduling" - - resyncPeriod = 10 * time.Hour - - // TODO(marun) Coordinate this value with the interval configured for the heartbeat controller. - heartbeatInterval = 20 * time.Second -) - -// SyncerConfig defines the syncer configuration that is guaranteed to -// vary across syncer deployments. Capturing these details in a struct -// simplifies defining these details in test fixture. -type SyncerConfig struct { - UpstreamConfig *rest.Config - DownstreamConfig *rest.Config - ResourcesToSync sets.Set[string] - SyncTargetPath logicalcluster.Path - SyncTargetName string - SyncTargetUID string - DownstreamNamespaceCleanDelay time.Duration - DNSImage string -} - -func StartSyncer(ctx context.Context, cfg *SyncerConfig, numSyncerThreads int, importPollInterval time.Duration, syncerNamespace string) error { - logger := klog.FromContext(ctx) - logger = logger.WithValues(SyncTargetWorkspace, cfg.SyncTargetPath, SyncTargetName, cfg.SyncTargetName) - - logger.V(2).Info("starting syncer") - - kcpVersion := version.Get().GitVersion - - bootstrapConfig := rest.CopyConfig(cfg.UpstreamConfig) - rest.AddUserAgent(bootstrapConfig, "kcp#syncer/"+kcpVersion) - kcpBootstrapClusterClient, err := kcpclusterclientset.NewForConfig(bootstrapConfig) - if err != nil { - return err - } - kcpSyncTargetClient := kcpBootstrapClusterClient.Cluster(cfg.SyncTargetPath) - - // kcpSyncTargetInformerFactory to watch a certain syncTarget - kcpSyncTargetInformerFactory := kcpinformers.NewSharedScopedInformerFactoryWithOptions(kcpSyncTargetClient, resyncPeriod, kcpinformers.WithTweakListOptions( - func(listOptions *metav1.ListOptions) { - listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", cfg.SyncTargetName).String() - }, - )) - - // TODO(david): we need to provide user-facing details if this polling goes on forever. Blocking here is a bad UX. - // TODO(david): Also, any regressions in our code will make any e2e test that starts a syncer (at least in-process) - // TODO(david): block until it hits the 10 minute overall test timeout. - logger.Info("attempting to retrieve the Syncer SyncTarget resource") - var syncTarget *workloadv1alpha1.SyncTarget - err = wait.PollImmediateInfinite(5*time.Second, func() (bool, error) { - var err error - syncTarget, err = kcpSyncTargetClient.WorkloadV1alpha1().SyncTargets().Get(ctx, cfg.SyncTargetName, metav1.GetOptions{}) - if err != nil { - return false, err - } - - // If the SyncTargetUID flag is set, we compare the provided value with the kcp synctarget uid, if the values don't match - // the syncer will refuse to work. - if cfg.SyncTargetUID != "" && cfg.SyncTargetUID != string(syncTarget.UID) { - return false, fmt.Errorf("unexpected SyncTarget UID %s, expected %s, refusing to sync", syncTarget.UID, cfg.SyncTargetUID) - } - return true, nil - }) - if err != nil { - return err - } - - // Resources are accepted as a set to ensure the provision of a - // unique set of resources, but all subsequent consumption is via - // slice whose entries are assumed to be unique. - resources := sets.List[string](cfg.ResourcesToSync) - - // Start api import first because spec and status syncers are blocked by - // gvr discovery finding all the configured resource types in the kcp - // workspace. - - // kcpImporterInformerFactory only used for apiimport to watch APIResourceImport - // TODO(qiujian16) make starting apiimporter optional after we check compatibility of supported APIExports - // of synctarget in syncer rather than in server. - kcpImporterInformerFactory := kcpinformers.NewSharedScopedInformerFactoryWithOptions(kcpSyncTargetClient, resyncPeriod) - apiImporter, err := NewAPIImporter( - cfg.UpstreamConfig, cfg.DownstreamConfig, - kcpSyncTargetInformerFactory.Workload().V1alpha1().SyncTargets(), - kcpImporterInformerFactory.Apiresource().V1alpha1().APIResourceImports(), - resources, - cfg.SyncTargetPath, cfg.SyncTargetName, syncTarget.GetUID()) - if err != nil { - return err - } - kcpImporterInformerFactory.Start(ctx.Done()) - - downstreamConfig := rest.CopyConfig(cfg.DownstreamConfig) - rest.AddUserAgent(downstreamConfig, "kcp#status-syncer/"+kcpVersion) - downstreamDynamicClient, err := dynamic.NewForConfig(downstreamConfig) - if err != nil { - return err - } - downstreamKubeClient, err := kubernetes.NewForConfig(downstreamConfig) - if err != nil { - return err - } - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), cfg.SyncTargetName) - logger = logger.WithValues(SyncTargetKey, syncTargetKey) - ctx = klog.NewContext(ctx, logger) - - syncTargetGVRSource := synctarget.NewSyncTargetGVRSource( - kcpSyncTargetInformerFactory.Workload().V1alpha1().SyncTargets(), - downstreamKubeClient, - ) - - // Check whether we're in the Advanced Scheduling feature-gated mode. - advancedSchedulingEnabled := false - if syncTarget.GetAnnotations()[AdvancedSchedulingFeatureAnnotation] == "true" { - logger.Info("Advanced Scheduling feature is enabled") - advancedSchedulingEnabled = true - } - - ddsifForDownstream, err := ddsif.NewScopedDiscoveringDynamicSharedInformerFactory(downstreamDynamicClient, nil, - func(o *metav1.ListOptions) { - o.LabelSelector = workloadv1alpha1.InternalDownstreamClusterLabel + "=" + syncTargetKey - }, - &filteringGVRSource{ - syncTargetGVRSource, - func(gvr schema.GroupVersionResource) bool { - return gvr.Group != kcpcorev1alpha1.SchemeGroupVersion.Group - }, - }, - cache.Indexers{ - indexers.ByNamespaceLocatorIndexName: indexers.IndexByNamespaceLocator, - }, - ) - if err != nil { - return err - } - - // syncerNamespaceInformerFactory to watch some DNS-related resources in the dns namespace - syncerNamespaceInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(downstreamKubeClient, resyncPeriod, kubernetesinformers.WithNamespace(syncerNamespace)) - dnsProcessor := dns.NewDNSProcessor(downstreamKubeClient, - syncerNamespaceInformerFactory, - cfg.SyncTargetName, syncTarget.GetUID(), syncerNamespace, cfg.DNSImage) - - alwaysRequiredGVRs := []schema.GroupVersionResource{ - corev1.SchemeGroupVersion.WithResource("secrets"), - corev1.SchemeGroupVersion.WithResource("namespaces"), - } - - namespaceCleaner := &delegatingCleaner{} - shardManager := synctarget.NewShardManager( - func(ctx context.Context, shardURLs workloadv1alpha1.VirtualWorkspace) (*synctarget.ShardAccess, func() error, error) { - upstreamConfig := rest.CopyConfig(cfg.UpstreamConfig) - upstreamConfig.Host = shardURLs.SyncerURL - rest.AddUserAgent(upstreamConfig, "kcp#syncing/"+kcpVersion) - upstreamSyncerClusterClient, err := kcpdynamic.NewForConfig(upstreamConfig) - if err != nil { - return nil, nil, err - } - - upstreamUpsyncConfig := rest.CopyConfig(cfg.UpstreamConfig) - upstreamUpsyncConfig.Host = shardURLs.UpsyncerURL - rest.AddUserAgent(upstreamUpsyncConfig, "kcp#upsyncing/"+kcpVersion) - upstreamUpsyncerClusterClient, err := kcpdynamic.NewForConfig(upstreamUpsyncConfig) - if err != nil { - return nil, nil, err - } - - ddsifForUpstreamSyncer, err := ddsif.NewDiscoveringDynamicSharedInformerFactory(upstreamSyncerClusterClient, nil, nil, - &filteringGVRSource{ - syncTargetGVRSource, - func(gvr schema.GroupVersionResource) bool { - // Don't expose pods or endpoints via the syncer vw - if gvr.Group == corev1.GroupName && (gvr.Resource == "pods") { - return false - } - return true - }, - }, cache.Indexers{}) - if err != nil { - return nil, nil, err - } - - ddsifForUpstreamUpsyncer, err := ddsif.NewDiscoveringDynamicSharedInformerFactory(upstreamUpsyncerClusterClient, nil, nil, - &filteringGVRSource{ - syncTargetGVRSource, - func(gvr schema.GroupVersionResource) bool { - return gvr.Group == corev1.GroupName && (gvr.Resource == "persistentvolumes" || - gvr.Resource == "pods" || - gvr.Resource == "endpoints") - }, - }, - cache.Indexers{}) - if err != nil { - return nil, nil, err - } - - logicalClusterIndex := synctarget.NewLogicalClusterIndex(ddsifForUpstreamSyncer, ddsifForUpstreamUpsyncer) - - secretMutator := mutators.NewSecretMutator() - podspecableMutator := mutators.NewPodspecableMutator( - func(clusterName logicalcluster.Name) (*ddsif.DiscoveringDynamicSharedInformerFactory, error) { - return ddsifForUpstreamSyncer, nil - }, syncerNamespaceInformerFactory.Core().V1().Services().Lister(), logicalcluster.From(syncTarget), cfg.SyncTargetName, types.UID(cfg.SyncTargetUID), syncerNamespace, kcpfeatures.DefaultFeatureGate.Enabled(kcpfeatures.SyncerTunnel)) - - logger.Info("Creating spec syncer") - specSyncer, err := spec.NewSpecSyncer(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, advancedSchedulingEnabled, - upstreamSyncerClusterClient, downstreamDynamicClient, downstreamKubeClient, ddsifForUpstreamSyncer, ddsifForDownstream, - namespaceCleaner, syncTarget.GetUID(), - syncerNamespace, dnsProcessor, cfg.DNSImage, secretMutator, podspecableMutator) - if err != nil { - return nil, nil, err - } - - upsyncerCleaner, err := upsync.NewUpSyncerCleanupController(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, types.UID(cfg.SyncTargetUID), syncTargetKey, - upstreamUpsyncerClusterClient, ddsifForUpstreamUpsyncer, - ddsifForDownstream) - if err != nil { - return nil, nil, err - } - - var cacheSyncsForAlwaysRequiredGVRs []cache.InformerSynced - for _, alwaysRequiredGVR := range alwaysRequiredGVRs { - if informer, err := ddsifForUpstreamSyncer.ForResource(alwaysRequiredGVR); err != nil { - return nil, nil, err - } else { - cacheSyncsForAlwaysRequiredGVRs = append(cacheSyncsForAlwaysRequiredGVRs, informer.Informer().HasSynced) - } - } - - start := func() error { - // Start and sync informer factories - - ddsifForUpstreamSyncer.Start(ctx.Done()) - ddsifForUpstreamUpsyncer.Start(ctx.Done()) - - if ok := cache.WaitForCacheSync(ctx.Done(), cacheSyncsForAlwaysRequiredGVRs...); !ok { - return fmt.Errorf("unable to sync watch caches for virtual workspace %q", shardURLs.SyncerURL) - } - - go ddsifForUpstreamSyncer.StartWorker(ctx) - go ddsifForUpstreamUpsyncer.StartWorker(ctx) - - go specSyncer.Start(ctx, numSyncerThreads) - go upsyncerCleaner.Start(ctx, numSyncerThreads) - - // Create and start GVR-specific controllers through controller managers - upstreamSyncerControllerManager := controllermanager.NewControllerManager(ctx, - "upstream-syncer", - controllermanager.InformerSource{ - Subscribe: ddsifForUpstreamSyncer.Subscribe, - Informers: func() (informers map[schema.GroupVersionResource]cache.SharedIndexInformer, notSynced []schema.GroupVersionResource) { - genericInformers, notSynced := ddsifForUpstreamSyncer.Informers() - informers = make(map[schema.GroupVersionResource]cache.SharedIndexInformer, len(genericInformers)) - for gvr, inf := range genericInformers { - informers[gvr] = inf.Informer() - } - return informers, notSynced - }, - }, - map[string]controllermanager.ManagedController{}, - ) - go upstreamSyncerControllerManager.Start(ctx) - - upstreamUpsyncerControllerManager := controllermanager.NewControllerManager(ctx, - "upstream-upsyncer", - controllermanager.InformerSource{ - Subscribe: ddsifForUpstreamUpsyncer.Subscribe, - Informers: func() (informers map[schema.GroupVersionResource]cache.SharedIndexInformer, notSynced []schema.GroupVersionResource) { - genericInformers, notSynced := ddsifForUpstreamUpsyncer.Informers() - informers = make(map[schema.GroupVersionResource]cache.SharedIndexInformer, len(genericInformers)) - for gvr, inf := range genericInformers { - informers[gvr] = inf.Informer() - } - return informers, notSynced - }, - }, - map[string]controllermanager.ManagedController{}, - ) - go upstreamUpsyncerControllerManager.Start(ctx) - - return nil - } - - return &synctarget.ShardAccess{ - SyncerClient: upstreamSyncerClusterClient, - SyncerDDSIF: ddsifForUpstreamSyncer, - UpsyncerClient: upstreamUpsyncerClusterClient, - UpsyncerDDSIF: ddsifForUpstreamUpsyncer, - - LogicalClusterIndex: logicalClusterIndex, - }, start, nil - }, - ) - - syncTargetController, err := synctarget.NewSyncTargetController( - logger, - kcpSyncTargetClient.WorkloadV1alpha1().SyncTargets(), - kcpSyncTargetInformerFactory.Workload().V1alpha1().SyncTargets(), - cfg.SyncTargetName, - logicalcluster.From(syncTarget), - syncTarget.GetUID(), - syncTargetGVRSource, - shardManager, - func(ctx context.Context, shardURL workloadv1alpha1.TunnelWorkspace) { - // Start tunneler for POD access - if kcpfeatures.DefaultFeatureGate.Enabled(kcpfeatures.SyncerTunnel) { - upstreamTunnelConfig := rest.CopyConfig(cfg.UpstreamConfig) - rest.AddUserAgent(upstreamTunnelConfig, "kcp#tunneler/"+kcpVersion) - upstreamTunnelConfig.Host = shardURL.URL - - StartSyncerTunnel(ctx, upstreamTunnelConfig, downstreamConfig, logicalcluster.From(syncTarget), cfg.SyncTargetName, cfg.SyncTargetUID, func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, _ := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - return nil, fmt.Errorf("failed to get informer for gvr: %s", gvr) - } - return informer.Lister(), nil - }) - } - }, - ) - if err != nil { - return err - } - - downstreamNamespaceController, err := namespace.NewDownstreamController(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, syncTarget.GetUID(), downstreamConfig, downstreamDynamicClient, ddsifForDownstream, shardManager.ShardAccessForCluster, syncerNamespace, cfg.DownstreamNamespaceCleanDelay) - if err != nil { - return err - } - namespaceCleaner.delegate = downstreamNamespaceController - - secretMutator := mutators.NewSecretMutator() - podspecableMutator := mutators.NewPodspecableMutator( - func(clusterName logicalcluster.Name) (*ddsif.DiscoveringDynamicSharedInformerFactory, error) { - shardAccess, ok, err := shardManager.ShardAccessForCluster(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - return shardAccess.SyncerDDSIF, nil - }, syncerNamespaceInformerFactory.Core().V1().Services().Lister(), logicalcluster.From(syncTarget), cfg.SyncTargetName, types.UID(cfg.SyncTargetUID), syncerNamespace, kcpfeatures.DefaultFeatureGate.Enabled(kcpfeatures.SyncerTunnel)) - - logger.Info("Creating spec syncer") - specSyncerForDownstream, err := spec.NewSpecSyncerForDownstream(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, advancedSchedulingEnabled, - shardManager.ShardAccessForCluster, downstreamDynamicClient, downstreamKubeClient, ddsifForDownstream, - namespaceCleaner, syncTarget.GetUID(), - syncerNamespace, dnsProcessor, cfg.DNSImage, secretMutator, podspecableMutator) - if err != nil { - return err - } - - logger.Info("Creating status syncer") - statusSyncer, err := status.NewStatusSyncer(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, advancedSchedulingEnabled, - shardManager.ShardAccessForCluster, downstreamDynamicClient, ddsifForDownstream, syncTarget.GetUID()) - if err != nil { - return err - } - - logger.Info("Creating resource upsyncer") - upSyncer, err := upsync.NewUpSyncer(logger, logicalcluster.From(syncTarget), cfg.SyncTargetName, syncTargetKey, shardManager.ShardAccessForCluster, downstreamDynamicClient, ddsifForDownstream, syncTarget.GetUID()) - if err != nil { - return err - } - - // Start and sync informer factories - var cacheSyncsForAlwaysRequiredGVRs []cache.InformerSynced - for _, alwaysRequiredGVR := range alwaysRequiredGVRs { - if informer, err := ddsifForDownstream.ForResource(alwaysRequiredGVR); err != nil { - return err - } else { - cacheSyncsForAlwaysRequiredGVRs = append(cacheSyncsForAlwaysRequiredGVRs, informer.Informer().HasSynced) - } - } - - ddsifForDownstream.Start(ctx.Done()) - kcpSyncTargetInformerFactory.Start(ctx.Done()) - syncerNamespaceInformerFactory.Start(ctx.Done()) - - kcpSyncTargetInformerFactory.WaitForCacheSync(ctx.Done()) - syncerNamespaceInformerFactory.WaitForCacheSync(ctx.Done()) - cache.WaitForCacheSync(ctx.Done(), cacheSyncsForAlwaysRequiredGVRs...) - - go ddsifForDownstream.StartWorker(ctx) - - // Start static controllers - go apiImporter.Start(klog.NewContext(ctx, logger.WithValues("resources", resources)), importPollInterval) - go specSyncerForDownstream.Start(ctx, numSyncerThreads) - go statusSyncer.Start(ctx, numSyncerThreads) - go upSyncer.Start(ctx, numSyncerThreads) - go downstreamNamespaceController.Start(ctx, numSyncerThreads) - - downstreamSyncerControllerManager := controllermanager.NewControllerManager(ctx, - "downstream-syncer", - controllermanager.InformerSource{ - Subscribe: ddsifForDownstream.Subscribe, - Informers: func() (informers map[schema.GroupVersionResource]cache.SharedIndexInformer, notSynced []schema.GroupVersionResource) { - genericInformers, notSynced := ddsifForDownstream.Informers() - informers = make(map[schema.GroupVersionResource]cache.SharedIndexInformer, len(genericInformers)) - for gvr, inf := range genericInformers { - informers[gvr] = inf.Informer() - } - return informers, notSynced - }, - }, - map[string]controllermanager.ManagedController{ - endpoints.ControllerName: { - RequiredGVRs: []schema.GroupVersionResource{ - corev1.SchemeGroupVersion.WithResource("services"), - corev1.SchemeGroupVersion.WithResource("endpoints"), - }, - Create: func(ctx context.Context) (controllermanager.StartControllerFunc, error) { - endpointController, err := endpoints.NewEndpointController(downstreamDynamicClient, ddsifForDownstream, logicalcluster.From(syncTarget), cfg.SyncTargetName, types.UID(cfg.SyncTargetUID)) - if err != nil { - return nil, err - } - return func(ctx context.Context) { - endpointController.Start(ctx, 2) - }, nil - }, - }, - }, - ) - - go syncTargetController.Start(ctx) - go downstreamSyncerControllerManager.Start(ctx) - - StartHeartbeat(ctx, kcpSyncTargetClient, cfg.SyncTargetName, cfg.SyncTargetUID) - - return nil -} - -func StartHeartbeat(ctx context.Context, kcpSyncTargetClient kcpclientset.Interface, syncTargetName, syncTargetUID string) { - logger := klog.FromContext(ctx) - - // Attempt to heartbeat every interval - go wait.UntilWithContext(ctx, func(ctx context.Context) { - var heartbeatTime time.Time - - // TODO(marun) Figure out a strategy for backoff to avoid a thundering herd problem with lots of syncers - // Attempt to heartbeat every second until successful. Errors are logged instead of being returned so the - // poll error can be safely ignored. - _ = wait.PollImmediateInfiniteWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - patchBytes := []byte(fmt.Sprintf(`[{"op":"test","path":"/metadata/uid","value":%q},{"op":"replace","path":"/status/lastSyncerHeartbeatTime","value":%q}]`, syncTargetUID, time.Now().Format(time.RFC3339))) - syncTarget, err := kcpSyncTargetClient.WorkloadV1alpha1().SyncTargets().Patch(ctx, syncTargetName, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status") - if err != nil { - logger.Error(err, "failed to set status.lastSyncerHeartbeatTime") - return false, nil - } - - heartbeatTime = syncTarget.Status.LastSyncerHeartbeatTime.Time - return true, nil - }) - logger.V(5).Info("Heartbeat set", "heartbeatTime", heartbeatTime) - }, heartbeatInterval) -} - -type filteringGVRSource struct { - ddsif.GVRSource - keepGVR func(gvr schema.GroupVersionResource) bool -} - -func (s *filteringGVRSource) GVRs() map[schema.GroupVersionResource]ddsif.GVRPartialMetadata { - gvrs := s.GVRSource.GVRs() - filteredGVRs := make(map[schema.GroupVersionResource]ddsif.GVRPartialMetadata, len(gvrs)) - for gvr, metadata := range gvrs { - if !s.keepGVR(gvr) { - continue - } - filteredGVRs[gvr] = metadata - } - return filteredGVRs -} - -type delegatingCleaner struct { - delegate shared.Cleaner -} - -func (s *delegatingCleaner) PlanCleaning(key string) { - if s.delegate == nil { - return - } - s.delegate.PlanCleaning(key) -} - -func (s *delegatingCleaner) CancelCleaning(key string) { - if s.delegate == nil { - return - } - s.delegate.CancelCleaning(key) -} diff --git a/pkg/syncer/synctarget/gvr_source.go b/pkg/syncer/synctarget/gvr_source.go deleted file mode 100644 index 519f8e72898..00000000000 --- a/pkg/syncer/synctarget/gvr_source.go +++ /dev/null @@ -1,343 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "fmt" - "strings" - "sync" - - authorizationv1 "k8s.io/api/authorization/v1" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - utilserrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/discovery" - "k8s.io/client-go/discovery/cached/memory" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/informer" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" -) - -var _ informer.GVRSource = (*syncTargetGVRSource)(nil) - -// NewSyncTargetGVRSource returns an [informer.GVRSource] that can update its list of GVRs based on -// a SyncTarget resource passed to the updateGVRs() method. -// -// It will be used to feed the various [informer.DiscoveringDynamicSharedInformerFactory] instances -// for downstream and upstream. -func NewSyncTargetGVRSource( - syncTargetInformer workloadv1alpha1informers.SyncTargetInformer, - downstreamKubeClient *kubernetes.Clientset, -) *syncTargetGVRSource { - return &syncTargetGVRSource{ - synctargetInformerHasSynced: syncTargetInformer.Informer().HasSynced, - gvrsToWatch: map[schema.GroupVersionResource]informer.GVRPartialMetadata{}, - isGVRAllowed: func(ctx context.Context, gvr schema.GroupVersionResource) (bool, error) { - if sar, err := downstreamKubeClient.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, &authorizationv1.SelfSubjectAccessReview{ - Spec: authorizationv1.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &authorizationv1.ResourceAttributes{ - Group: gvr.Group, - Resource: gvr.Resource, - Version: gvr.Version, - Verb: "*", - }, - }, - }, metav1.CreateOptions{}); err != nil { - return false, err - } else { - return sar.Status.Allowed, nil - } - }, - downstreamDiscoveryClient: memory.NewMemCacheClient(discovery.NewDiscoveryClient(downstreamKubeClient.RESTClient())), - } -} - -type syncTargetGVRSource struct { - gvrsToWatchLock sync.RWMutex - gvrsToWatch map[schema.GroupVersionResource]informer.GVRPartialMetadata - - // Support subscribers that want to know when Synced GVRs have changed. - subscribersLock sync.Mutex - subscribers []chan<- struct{} - - synctargetInformerHasSynced cache.InformerSynced - isGVRAllowed func(ctx context.Context, gvr schema.GroupVersionResource) (bool, error) - - downstreamDiscoveryClient discovery.CachedDiscoveryInterface -} - -// GVRs returns the required metadata (scope, kind, singular name) about all GVRs that should be synced. -// It implements [informer.GVRSource.GVRs]. -func (c *syncTargetGVRSource) GVRs() map[schema.GroupVersionResource]informer.GVRPartialMetadata { - c.gvrsToWatchLock.RLock() - defer c.gvrsToWatchLock.RUnlock() - - gvrs := make(map[schema.GroupVersionResource]informer.GVRPartialMetadata, len(c.gvrsToWatch)+len(builtinGVRs)+2) - gvrs[corev1.SchemeGroupVersion.WithResource("namespaces")] = informer.GVRPartialMetadata{ - Scope: apiextensionsv1.ClusterScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "namespace", - Kind: "Namespace", - }, - } - for key, value := range builtinGVRs { - gvrs[key] = value - } - for key, value := range c.gvrsToWatch { - gvrs[key] = value - } - return gvrs -} - -// Ready returns true if the controller is ready to return the GVRs to sync. -// It implements [informer.GVRSource.Ready]. -func (c *syncTargetGVRSource) Ready() bool { - return c.synctargetInformerHasSynced() -} - -// Subscribe returns a new channel to which the controller writes whenever -// its list of GVRs has changed. -// It implements [informer.GVRSource.Subscribe]. -func (c *syncTargetGVRSource) Subscribe() <-chan struct{} { - c.subscribersLock.Lock() - defer c.subscribersLock.Unlock() - - // Use a buffered channel so we can always send at least 1, regardless of consumer status. - changes := make(chan struct{}, 1) - c.subscribers = append(c.subscribers, changes) - - return changes -} - -// removeUnusedGVRs removes the GVRs which are not required anymore, and return `true` if GVRs were updated. -func (c *syncTargetGVRSource) removeUnusedGVRs(ctx context.Context, requiredGVRs map[schema.GroupVersionResource]bool) bool { - logger := klog.FromContext(ctx) - - c.gvrsToWatchLock.Lock() - defer c.gvrsToWatchLock.Unlock() - - updated := false - for gvr := range c.gvrsToWatch { - if _, ok := requiredGVRs[gvr]; !ok { - logger.WithValues("gvr", gvr.String()).V(2).Info("Stop syncer for gvr") - delete(c.gvrsToWatch, gvr) - updated = true - } - } - return updated -} - -// addGVR adds the given GVR if it isn't already in the list, and returns `true` if the GVR was added, -// `false` if it was already there. -func (c *syncTargetGVRSource) addGVR(ctx context.Context, gvr schema.GroupVersionResource) (bool, error) { - logger := klog.FromContext(ctx) - - c.gvrsToWatchLock.Lock() - defer c.gvrsToWatchLock.Unlock() - - if _, ok := c.gvrsToWatch[gvr]; ok { - logger.V(2).Info("Informer is started already") - return false, nil - } - - partialMetadata, err := c.getGVRPartialMetadata(gvr) - if err != nil { - return false, err - } - - c.gvrsToWatch[gvr] = *partialMetadata - - return true, nil -} - -func (c *syncTargetGVRSource) getGVRPartialMetadata(gvr schema.GroupVersionResource) (*informer.GVRPartialMetadata, error) { - apiResourceList, err := c.downstreamDiscoveryClient.ServerResourcesForGroupVersion(gvr.GroupVersion().String()) - if err != nil { - return nil, err - } - for _, apiResource := range apiResourceList.APIResources { - if apiResource.Name == gvr.Resource { - var resourceScope apiextensionsv1.ResourceScope - if apiResource.Namespaced { - resourceScope = apiextensionsv1.NamespaceScoped - } else { - resourceScope = apiextensionsv1.ClusterScoped - } - - return &informer.GVRPartialMetadata{ - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Kind: apiResource.Kind, - Singular: apiResource.SingularName, - }, - Scope: resourceScope, - }, - nil - } - } - return nil, fmt.Errorf("unable to retrieve discovery for GVR: %s", gvr) -} - -func (c *syncTargetGVRSource) notifySubscribers(ctx context.Context) { - logger := klog.FromContext(ctx) - - c.subscribersLock.Lock() - defer c.subscribersLock.Unlock() - - for index, ch := range c.subscribers { - logger.V(4).Info("Attempting to notify subscribers", "index", index) - select { - case ch <- struct{}{}: - logger.V(4).Info("Successfully notified subscriber", "index", index) - default: - logger.V(4).Info("Unable to notify subscriber - channel full", "index", index) - } - } -} - -var builtinGVRs = map[schema.GroupVersionResource]informer.GVRPartialMetadata{ - { - Version: "v1", - Resource: "configmaps", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "configMap", - Kind: "configmap", - }, - }, - { - Version: "v1", - Resource: "secrets", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "secret", - Kind: "Secret", - }, - }, -} - -func getAllGVRs(synctarget *workloadv1alpha1.SyncTarget) map[schema.GroupVersionResource]bool { - // TODO(jmprusi): Added Configmaps and Secrets to the default syncing, but we should figure out - // a way to avoid doing that: https://github.com/kcp-dev/kcp/issues/727 - gvrs := map[schema.GroupVersionResource]bool{} - - for gvr := range builtinGVRs { - gvrs[gvr] = true - } - - if synctarget == nil { - return gvrs - } - - // TODO(qiujian16) We currently checks the API compatibility on the server side. When we change to check the - // compatibility on the syncer side, this part needs to be changed. - for _, r := range synctarget.Status.SyncedResources { - if r.State != workloadv1alpha1.ResourceSchemaAcceptedState { - continue - } - for _, version := range r.Versions { - gvrs[schema.GroupVersionResource{ - Group: r.Group, - Version: version, - Resource: r.Resource, - }] = true - } - } - - return gvrs -} - -func (c *syncTargetGVRSource) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (reconcileStatus, error) { - logger := klog.FromContext(ctx) - - requiredGVRs := getAllGVRs(syncTarget) - - c.downstreamDiscoveryClient.Invalidate() - - notify := false - var unauthorizedGVRs []string - var errs []error - for gvr := range requiredGVRs { - logger := logger.WithValues("gvr", gvr.String()) - ctx := klog.NewContext(ctx, logger) - allowed, err := c.isGVRAllowed(ctx, gvr) - if err != nil { - logger.Error(err, "Failed to check ssar") - errs = append(errs, err) - unauthorizedGVRs = append(unauthorizedGVRs, gvr.String()) - continue - } - - if !allowed { - logger.V(2).Info("Stop informer since the syncer is not authorized to sync") - // remove this from requiredGVRs so its informer will be stopped later. - delete(requiredGVRs, gvr) - unauthorizedGVRs = append(unauthorizedGVRs, gvr.String()) - continue - } - - if updated, err := c.addGVR(ctx, gvr); err != nil { - errs = append(errs, err) - continue - } else if updated { - notify = true - } - } - - if updated := c.removeUnusedGVRs(ctx, requiredGVRs); updated { - notify = true - } - - if notify { - c.notifySubscribers(ctx) - } - - if syncTarget == nil { - return reconcileStatusContinue, utilserrors.NewAggregate(errs) - } - - oldCondition := conditions.Get(syncTarget, workloadv1alpha1.SyncerAuthorized).DeepCopy() - if len(unauthorizedGVRs) > 0 { - conditions.MarkFalse( - syncTarget, - workloadv1alpha1.SyncerAuthorized, - "SyncerUnauthorized", - conditionsv1alpha1.ConditionSeverityError, - "SSAR check failed for gvrs: %s", strings.Join(unauthorizedGVRs, ";"), - ) - } else { - conditions.MarkTrue(syncTarget, workloadv1alpha1.SyncerAuthorized) - } - newCondition := conditions.Get(syncTarget, workloadv1alpha1.SyncerAuthorized) - - if equality.Semantic.DeepEqual(oldCondition, newCondition) { - return reconcileStatusContinue, utilserrors.NewAggregate(errs) - } - - return reconcileStatusStopAndRequeue, utilserrors.NewAggregate(errs) -} diff --git a/pkg/syncer/synctarget/shard_manager.go b/pkg/syncer/synctarget/shard_manager.go deleted file mode 100644 index 6a4a65d0b83..00000000000 --- a/pkg/syncer/synctarget/shard_manager.go +++ /dev/null @@ -1,256 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "fmt" - "sync" - - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - utilserrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/informer" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// NewLogicalClusterIndex creates an index that contains all the keys of the synced and upsynced resources, -// indexed by logical cluster name. -// This index is filled by the syncer and upsyncer ddsifs, and is used to check if the related shard contains -// a given logicalCluster. -func NewLogicalClusterIndex(syncerDDSIF, upsyncerDDSIF *informer.DiscoveringDynamicSharedInformerFactory) *logicalClusterIndex { - index := &logicalClusterIndex{ - indexedKeys: map[string]map[string]interface{}{}, - indexedKeysLock: sync.RWMutex{}, - } - - clusterNameAndKey := func(syncerSource string, gvr schema.GroupVersionResource, obj *unstructured.Unstructured) (clusterName, clusterKeys string, err error) { - clusterName = logicalcluster.From(obj).String() - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - return "", "", err - } - - return clusterName, fmt.Sprintf("%s$$%s.%s.%s##%s", syncerSource, gvr.Resource, gvr.Group, gvr.Version, key), nil - } - - add := func(syncerSource string, gvr schema.GroupVersionResource, obj *unstructured.Unstructured) error { - clusterName, clusterKey, err := clusterNameAndKey(syncerSource, gvr, obj) - if err != nil { - return err - } - - index.indexedKeysLock.Lock() - defer index.indexedKeysLock.Unlock() - - clusterKeys, ok := index.indexedKeys[clusterName] - if !ok { - clusterKeys = map[string]interface{}{} - index.indexedKeys[clusterName] = clusterKeys - } - clusterKeys[clusterKey] = nil - return nil - } - - delete := func(syncerSource string, gvr schema.GroupVersionResource, obj *unstructured.Unstructured) error { - clusterName, clusterKey, err := clusterNameAndKey(syncerSource, gvr, obj) - if err != nil { - return err - } - - index.indexedKeysLock.Lock() - defer index.indexedKeysLock.Unlock() - - clusterKeys, ok := index.indexedKeys[clusterName] - if !ok { - return nil - } - delete(clusterKeys, clusterKey) - if len(clusterKeys) == 0 { - delete(index.indexedKeys, clusterName) - } - return nil - } - - syncerDDSIF.AddEventHandler(informer.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - _ = add("S", gvr, obj.(*unstructured.Unstructured)) - }, - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - _ = add("S", gvr, obj.(*unstructured.Unstructured)) - }, - }) - - upsyncerDDSIF.AddEventHandler(informer.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - _ = delete("U", gvr, obj.(*unstructured.Unstructured)) - }, - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - _ = delete("U", gvr, obj.(*unstructured.Unstructured)) - }, - }) - - return index -} - -// Exists returns true if the logicalClusterIndex contains at least one value -// for the given clusterName. -func (index *logicalClusterIndex) Exists(clusterName logicalcluster.Name) bool { - index.indexedKeysLock.Lock() - defer index.indexedKeysLock.Unlock() - - _, ok := index.indexedKeys[string(clusterName)] - return ok -} - -type logicalClusterIndex struct { - indexedKeys map[string]map[string]interface{} - indexedKeysLock sync.RWMutex -} - -// ShardAccess contains clustered dynamic clients, as well as -// cluster-aware informer factories for both the Syncer and Upsyncer virtual workspaces -// associated to a Shard. -type ShardAccess struct { - SyncerClient kcpdynamic.ClusterInterface - SyncerDDSIF *informer.DiscoveringDynamicSharedInformerFactory - UpsyncerClient kcpdynamic.ClusterInterface - UpsyncerDDSIF *informer.DiscoveringDynamicSharedInformerFactory - - // LogicalClusterIndex contains all the keys of the synced and upsynced resources - // indexed by logical cluster name. - LogicalClusterIndex *logicalClusterIndex -} - -// GetShardAccessFunc is the type of a function that provide a [ShardAccess] from a -// logical cluster name. -type GetShardAccessFunc func(clusterName logicalcluster.Name) (ShardAccess, bool, error) - -// NewShardManager returns a [ShardManager] that can manage the addition or removal of shard-specific -// upstream virtual workspace URLs, based on a SyncTarget resource passed to the updateShards() method. -// -// When a shard is found (identified by the couple of virtual workspace URLs - for both syncer and upsyncer), -// then the newShardControllers() method is called, the resulting [ShardAccess] is stored, -// and the resulting start() function is called in a goroutine. -// -// When a shard is removed, the context initially passed to the -// startShardControllers() method is cancelled. -// -// The ShardAccessForCluster() method will be used by some downstream controllers in order to -// be able to get / list upstream resources in the right shard. -func NewShardManager( - newShardControllers func(ctx context.Context, shardURLs workloadv1alpha1.VirtualWorkspace) (acces *ShardAccess, start func() error, err error)) *shardManager { - return &shardManager{ - controllers: map[workloadv1alpha1.VirtualWorkspace]shardControllers{}, - newShardControllers: newShardControllers, - } -} - -type shardManager struct { - controllersLock sync.RWMutex - controllers map[workloadv1alpha1.VirtualWorkspace]shardControllers - newShardControllers func(ctx context.Context, shardURLs workloadv1alpha1.VirtualWorkspace) (acces *ShardAccess, start func() error, err error) -} - -func (c *shardManager) ShardAccessForCluster(clusterName logicalcluster.Name) (ShardAccess, bool, error) { - c.controllersLock.RLock() - defer c.controllersLock.RUnlock() - - for _, shardControllers := range c.controllers { - if shardControllers.ShardAccess.LogicalClusterIndex.Exists(clusterName) { - return shardControllers.ShardAccess, true, nil - } - } - return ShardAccess{}, false, nil -} - -func (c *shardManager) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (reconcileStatus, error) { - logger := klog.FromContext(ctx) - - requiredShards := map[workloadv1alpha1.VirtualWorkspace]bool{} - if syncTarget != nil { - for _, shardURLs := range syncTarget.Status.VirtualWorkspaces { - requiredShards[shardURLs] = true - } - } - - c.controllersLock.Lock() - defer c.controllersLock.Unlock() - - // Remove obsolete controllers that don't have a shard anymore - for shardURLs, shardControllers := range c.controllers { - if _, ok := requiredShards[shardURLs]; ok { - // The controllers are still expected => don't remove them - continue - } - // The controllers should not be running - // Stop them and remove it from the list of started shard controllers - shardControllers.stop() - delete(c.controllers, shardURLs) - } - - var errs []error - // Create and start missing controllers that have Virtual Workspace URLs for a shard - for shardURLs := range requiredShards { - shardURLs := shardURLs - if _, ok := c.controllers[shardURLs]; ok { - // The controllers are already started - continue - } - - // Start the controllers - shardControllersContext, cancelFunc := context.WithCancel(ctx) - // Create the controllers - shardAccess, start, err := c.newShardControllers(shardControllersContext, shardURLs) - if err != nil { - logger.Error(err, "failed creating controllers for shard", "shard", shardURLs) - errs = append(errs, err) - cancelFunc() - continue - } - c.controllers[shardURLs] = shardControllers{ - *shardAccess, cancelFunc, false, - } - go func() { - err := start() - c.controllersLock.Lock() - defer c.controllersLock.Unlock() - - if err != nil { - delete(c.controllers, shardURLs) - cancelFunc() - } else { - controllers := c.controllers[shardURLs] - controllers.ready = true - c.controllers[shardURLs] = controllers - } - }() - } - return reconcileStatusContinue, utilserrors.NewAggregate(errs) -} - -type shardControllers struct { - ShardAccess - stop func() - ready bool -} diff --git a/pkg/syncer/synctarget/synctarget_controller.go b/pkg/syncer/synctarget/synctarget_controller.go deleted file mode 100644 index ed9fae062a8..00000000000 --- a/pkg/syncer/synctarget/synctarget_controller.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "fmt" - "time" - - "github.com/go-logr/logr" - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/reconciler/committer" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - workloadv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - resyncPeriod = 10 * time.Hour - controllerName = "kcp-syncer-synctarget-gvrsource-controller" -) - -type controller struct { - queue workqueue.RateLimitingInterface - - syncTargetUID types.UID - syncTargetLister workloadv1alpha1listers.SyncTargetLister - commit CommitFunc - - reconcilers []reconciler -} - -// NewSyncTargetController returns a controller that watches the [workloadv1alpha1.SyncTarget] -// associated to this syncer. -// It then calls the update methods on the shardManager and gvrSource -// that were passed in arguments, to update available shards and GVRs -// according to the content of the SyncTarget status. -func NewSyncTargetController( - syncerLogger logr.Logger, - syncTargetClient workloadv1alpha1client.SyncTargetInterface, - syncTargetInformer workloadv1alpha1informers.SyncTargetInformer, - syncTargetName string, - syncTargetClusterName logicalcluster.Name, - syncTargetUID types.UID, - gvrSource *syncTargetGVRSource, - shardManager *shardManager, - startShardTunneler func(ctx context.Context, shardURL workloadv1alpha1.TunnelWorkspace), -) (*controller, error) { - c := &controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName), - syncTargetUID: syncTargetUID, - syncTargetLister: syncTargetInformer.Lister(), - commit: committer.NewCommitterScoped[*SyncTarget, Patcher, *SyncTargetSpec, *SyncTargetStatus](syncTargetClient), - - reconcilers: []reconciler{ - gvrSource, - shardManager, - &tunnelerReconciler{ - startedTunnelers: make(map[workloadv1alpha1.TunnelWorkspace]tunnelerStopper), - startShardTunneler: startShardTunneler, - }, - }, - } - - logger := logging.WithReconciler(syncerLogger, controllerName) - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: func(obj interface{}) bool { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - return false - } - _, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return false - } - return name == syncTargetName - }, - Handler: cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueue(obj, logger) }, - UpdateFunc: func(old, obj interface{}) { c.enqueue(obj, logger) }, - DeleteFunc: func(obj interface{}) { c.enqueue(obj, logger) }, - }, - }) - - return c, nil -} - -type SyncTarget = workloadv1alpha1.SyncTarget -type SyncTargetSpec = workloadv1alpha1.SyncTargetSpec -type SyncTargetStatus = workloadv1alpha1.SyncTargetStatus -type Patcher = workloadv1alpha1client.SyncTargetInterface -type Resource = committer.Resource[*SyncTargetSpec, *SyncTargetStatus] -type CommitFunc = func(context.Context, *Resource, *Resource) error - -func (c *controller) enqueue(obj interface{}, logger logr.Logger) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info("queueing SyncTarget") - - c.queue.Add(key) -} - -// Start starts the controller worker. -func (c *controller) Start(ctx context.Context) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if requeue, err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("failed to sync %q: %w", key, err)) - c.queue.AddRateLimited(key) - return true - } else if requeue { - // only requeue if we didn't error, but we still want to requeue - c.queue.Add(key) - return true - } - - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string) (bool, error) { - logger := klog.FromContext(ctx) - - _, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - logger.Error(err, "failed to split key, dropping") - return false, nil - } - - syncTarget, err := c.syncTargetLister.Get(name) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - if apierrors.IsNotFound(err) || syncTarget.GetUID() != c.syncTargetUID { - return c.reconcile(ctx, nil) - } - - previous := syncTarget - syncTarget = syncTarget.DeepCopy() - - var errs []error - requeue, err := c.reconcile(ctx, syncTarget) - if err != nil { - errs = append(errs, err) - } - - oldResource := &Resource{ObjectMeta: previous.ObjectMeta, Spec: &previous.Spec, Status: &previous.Status} - newResource := &Resource{ObjectMeta: syncTarget.ObjectMeta, Spec: &syncTarget.Spec, Status: &syncTarget.Status} - if err := c.commit(ctx, oldResource, newResource); err != nil { - errs = append(errs, err) - } - - return requeue, errors.NewAggregate(errs) -} diff --git a/pkg/syncer/synctarget/synctarget_reconciler.go b/pkg/syncer/synctarget/synctarget_reconciler.go deleted file mode 100644 index af281a88c22..00000000000 --- a/pkg/syncer/synctarget/synctarget_reconciler.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - - utilserrors "k8s.io/apimachinery/pkg/util/errors" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type reconcileStatus int - -const ( - reconcileStatusStopAndRequeue reconcileStatus = iota - reconcileStatusContinue -) - -type reconciler interface { - reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (reconcileStatus, error) -} - -func (c *controller) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (bool, error) { - var errs []error - - requeue := false - for _, r := range c.reconcilers { - var err error - var status reconcileStatus - status, err = r.reconcile(ctx, syncTarget) - if err != nil { - errs = append(errs, err) - } - if status == reconcileStatusStopAndRequeue { - requeue = true - break - } - } - - return requeue, utilserrors.NewAggregate(errs) -} diff --git a/pkg/syncer/synctarget/tunneler_reconciler.go b/pkg/syncer/synctarget/tunneler_reconciler.go deleted file mode 100644 index d3c1c4ce917..00000000000 --- a/pkg/syncer/synctarget/tunneler_reconciler.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package synctarget - -import ( - "context" - "sync" - - utilserrors "k8s.io/apimachinery/pkg/util/errors" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -type tunnelerReconciler struct { - startedTunnelersLock sync.RWMutex - startedTunnelers map[workloadv1alpha1.TunnelWorkspace]tunnelerStopper - - startShardTunneler func(ctx context.Context, shardURL workloadv1alpha1.TunnelWorkspace) -} - -func (c *tunnelerReconciler) reconcile(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (reconcileStatus, error) { - requiredShards := map[workloadv1alpha1.TunnelWorkspace]bool{} - if syncTarget != nil { - for _, shardURL := range syncTarget.Status.TunnelWorkspaces { - requiredShards[shardURL] = true - } - } - - c.startedTunnelersLock.Lock() - defer c.startedTunnelersLock.Unlock() - - // Remove obsolete tunnelers that don't have a shard anymore - for shardURL, stopTunneler := range c.startedTunnelers { - if _, ok := requiredShards[shardURL]; ok { - // The tunnelers are still expected => don't remove them - continue - } - // The tunnelers should not be running - // Stop them and remove it from the list of started shard tunneler - stopTunneler() - delete(c.startedTunnelers, shardURL) - } - - var errs []error - // Create and start missing tunnelers - for shardURL := range requiredShards { - if _, ok := c.startedTunnelers[shardURL]; ok { - // The tunnelers are already started - continue - } - - // Start the tunnelers - shardTunnelerContext, cancelFunc := context.WithCancel(ctx) - - // Create the tunneler - c.startShardTunneler(shardTunnelerContext, shardURL) - c.startedTunnelers[shardURL] = tunnelerStopper(cancelFunc) - } - return reconcileStatusContinue, utilserrors.NewAggregate(errs) -} - -type tunnelerStopper func() diff --git a/pkg/syncer/tunneler.go b/pkg/syncer/tunneler.go deleted file mode 100644 index dad134c3de7..00000000000 --- a/pkg/syncer/tunneler.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "context" - "fmt" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - "k8s.io/utils/clock" - - "github.com/kcp-dev/kcp/pkg/server/requestinfo" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - "github.com/kcp-dev/kcp/pkg/tunneler" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var ( - errorScheme = runtime.NewScheme() - errorCodecs = serializer.NewCodecFactory(errorScheme) -) - -func init() { - errorScheme.AddUnversionedTypes(metav1.Unversioned, - &metav1.Status{}, - ) -} - -type ResourceListerFunc func(gvr schema.GroupVersionResource) (cache.GenericLister, error) - -// StartSyncerTunnel blocks until the context is cancelled trying to establish a tunnel against the specified target. -func StartSyncerTunnel(ctx context.Context, upstream, downstream *rest.Config, syncTargetWorkspace logicalcluster.Name, syncTargetName, syncTargetUID string, getDownstreamLister ResourceListerFunc) { - // connect to create the reverse tunnels - var ( - initBackoff = 5 * time.Second - maxBackoff = 5 * time.Minute - resetDuration = 1 * time.Minute - backoffFactor = 2.0 - jitter = 1.0 - clock = &clock.RealClock{} - sliding = true - ) - - backoffMgr := wait.NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration, backoffFactor, jitter, clock) - logger := klog.FromContext(ctx) - - go wait.BackoffUntil(func() { - logger.V(5).Info("starting tunnel") - err := startTunneler(ctx, upstream, downstream, syncTargetWorkspace, syncTargetName, syncTargetUID, getDownstreamLister) - if err != nil { - logger.Error(err, "failed to create tunnel") - } - }, backoffMgr, sliding, ctx.Done()) -} - -func startTunneler(ctx context.Context, upstream, downstream *rest.Config, syncTargetClusterName logicalcluster.Name, syncTargetName, syncTargetUID string, getDownstreamLister ResourceListerFunc) error { - logger := klog.FromContext(ctx) - - // syncer --> kcp - clientUpstream, err := rest.HTTPClientFor(upstream) - if err != nil { - return err - } - - cfg := *downstream - // use http/1.1 to allow SPDY tunneling: pod exec, port-forward, ... - cfg.NextProtos = []string{"http/1.1"} - // syncer --> local apiserver - url, err := url.Parse(cfg.Host) - if err != nil { - return err - } - - proxy := httputil.NewSingleHostReverseProxy(url) - if err != nil { - return err - } - - clientDownstream, err := rest.HTTPClientFor(&cfg) - if err != nil { - return err - } - - proxy.Transport = clientDownstream.Transport - - // create the reverse connection - // virtual workspaces - u, err := url.Parse(upstream.Host) - if err != nil { - return err - } - // strip the path - u.Path = "" - dst, err := tunneler.SyncerTunnelURL(u.String(), syncTargetClusterName.String(), syncTargetName) - if err != nil { - return err - } - - logger = logger.WithValues("syncer-tunnel-url", dst) - logger.Info("connecting to destination URL") - l, err := tunneler.NewListener(clientUpstream, dst) - if err != nil { - return err - } - defer l.Close() - - // reverse proxy the request coming from the reverse connection to the p-cluster apiserver - server := &http.Server{ReadHeaderTimeout: 30 * time.Second, Handler: withPodAccessCheck(proxy, getDownstreamLister, syncTargetClusterName, syncTargetName, syncTargetUID)} - defer server.Close() - - logger.V(2).Info("serving on reverse connection") - errCh := make(chan error) - go func() { - errCh <- server.Serve(l) - }() - - select { - case err = <-errCh: - case <-ctx.Done(): - err = server.Close() - } - logger.V(2).Info("stop serving on reverse connection") - return err -} - -func withPodAccessCheck(handler http.Handler, getDownstreamLister ResourceListerFunc, synctargetClusterName logicalcluster.Name, synctargetName, syncTargetUID string) http.HandlerFunc { - namespaceGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - podGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - - return func(w http.ResponseWriter, req *http.Request) { - resolver := requestinfo.NewKCPRequestInfoResolver() - requestInfo, err := resolver.NewRequestInfo(req) - if err != nil { - responsewriters.ErrorNegotiated( - errors.NewInternalError(fmt.Errorf("could not resolve RequestInfo: %w", err)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Ensure that requests are only for pods, and we have the required information, if not, return false. - if requestInfo.Resource != "pods" || requestInfo.Subresource == "" || requestInfo.Name == "" || requestInfo.Namespace == "" { - responsewriters.ErrorNegotiated( - errors.NewForbidden(podGVR.GroupResource(), requestInfo.Name, fmt.Errorf("invalid resource and/or subresource")), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Ensure that requests are only for pods in a namespace owned by this syncer, if not, return false. - downstreamNamespaceName := requestInfo.Namespace - - nsInformer, err := getDownstreamLister(namespaceGVR) - if err != nil { - responsewriters.ErrorNegotiated( - errors.NewInternalError(fmt.Errorf("error while getting downstream namespace lister: %w", err)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - obj, err := nsInformer.Get(downstreamNamespaceName) - if err != nil { - responsewriters.ErrorNegotiated( - errors.NewForbidden(namespaceGVR.GroupResource(), requestInfo.Namespace, fmt.Errorf("forbidden")), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - downstreamNs, ok := obj.(*unstructured.Unstructured) - if !ok { - responsewriters.ErrorNegotiated( - errors.NewInternalError(fmt.Errorf("namespace resource should be *unstructured.Unstructured but was: %T", obj)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Ensure the referenced downstream namespace locator is correct and owned by this syncer. - annotations := downstreamNs.GetAnnotations() - if locator, ok, err := shared.LocatorFromAnnotations(annotations); ok { - if err != nil || locator.SyncTarget.Name != synctargetName || string(locator.SyncTarget.UID) != syncTargetUID || locator.SyncTarget.ClusterName != string(synctargetClusterName) { - responsewriters.ErrorNegotiated( - errors.NewForbidden(podGVR.GroupResource(), requestInfo.Name, fmt.Errorf("forbidden")), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - } else { - responsewriters.ErrorNegotiated( - errors.NewForbidden(podGVR.GroupResource(), requestInfo.Name, fmt.Errorf("forbidden")), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Ensure Pod is in Upsynced state. - podName := requestInfo.Name - podInformer, err := getDownstreamLister(podGVR) - if err != nil { - responsewriters.ErrorNegotiated( - errors.NewInternalError(fmt.Errorf("error while getting pod lister: %w", err)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - obj, err = podInformer.ByNamespace(downstreamNamespaceName).Get(podName) - if err != nil { - responsewriters.ErrorNegotiated( - errors.NewForbidden(podGVR.GroupResource(), requestInfo.Name, fmt.Errorf("forbidden")), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - if downstreamPod, ok := obj.(*unstructured.Unstructured); ok { - if downstreamPod.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+workloadv1alpha1.ToSyncTargetKey(synctargetClusterName, synctargetName)] != string(workloadv1alpha1.ResourceStateUpsync) { - responsewriters.ErrorNegotiated( - errors.NewForbidden(podGVR.GroupResource(), requestInfo.Name, fmt.Errorf("forbidden")), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - } else { - responsewriters.ErrorNegotiated( - errors.NewInternalError(fmt.Errorf("pod resource should be *unstructured.Unstructured but was: %T", obj)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - handler.ServeHTTP(w, req) - } -} diff --git a/pkg/syncer/tunneler_test.go b/pkg/syncer/tunneler_test.go deleted file mode 100644 index d444a78f7dd..00000000000 --- a/pkg/syncer/tunneler_test.go +++ /dev/null @@ -1,207 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func Test_withPodAccessCheck(t *testing.T) { - tests := []struct { - name string - podState string - subresource string - synctargetClusterName string - synctargetName string - syncTargetUID string - expectedStatusCode int - requestPath string - }{ - { - name: "valid pod and valid namespace, expect success", - expectedStatusCode: http.StatusOK, - podState: "Upsync", - synctargetName: "test-synctarget", - syncTargetUID: "test-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/test-namespace/pods/test-pod/log", - }, - { - name: "pod not in upsync state, expect 403", - expectedStatusCode: http.StatusForbidden, - podState: "", - synctargetName: "test-synctarget", - syncTargetUID: "test-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/test-namespace/pods/test-pod/log", - }, - { - name: "namespace not owned by the syncer, expect 403", - expectedStatusCode: http.StatusForbidden, - podState: "Upsync", - synctargetName: "test-synctarget", - syncTargetUID: "test-another-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/test-namespace/pods/test-pod/log", - }, - { - name: "non existent pod, expect 403", - expectedStatusCode: http.StatusForbidden, - podState: "Upsync", - synctargetName: "test-synctarget", - syncTargetUID: "test-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/test-namespace/pods/not-existing-pod/log", - }, - { - name: "non existent namespace, expect 403", - expectedStatusCode: http.StatusForbidden, - podState: "Upsync", - synctargetName: "test-synctarget", - syncTargetUID: "test-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/not-existing-namespace/pods/test-pod/log", - }, - { - name: "request is not for a pod, expect 403", - expectedStatusCode: http.StatusForbidden, - podState: "Upsync", - synctargetName: "test-synctarget", - syncTargetUID: "test-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/test-namespace/configmaps/test-pod/status", - }, - { - name: "request doesn't contain a subresource, expect 403", - expectedStatusCode: http.StatusForbidden, - podState: "Upsync", - synctargetName: "test-synctarget", - syncTargetUID: "test-uid", - synctargetClusterName: "test-workspace", - requestPath: "/api/v1/namespaces/test-namespace/pods/test-pod", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rw := httptest.NewRecorder() - - fakeDownstreamInformers := map[schema.GroupVersionResource]cache.GenericLister{} - fakeDownstreamInformers[schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}] = &fakeLister{ - objs: []runtime.Object{ - &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: "test-namespace", - Labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name(tt.synctargetClusterName), tt.synctargetName): tt.podState, - }, - }, - }, - }, - } - locator, err := json.Marshal(shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - Name: "test-synctarget", - UID: "test-uid", - ClusterName: "test-workspace", - }, - ClusterName: "test-workspace", - Namespace: "test-namespace", - }) - require.NoError(t, err) - - fakeDownstreamInformers[schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}] = &fakeLister{ - objs: []runtime.Object{ - &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-namespace", - Annotations: map[string]string{ - shared.NamespaceLocatorAnnotation: string(locator), - }, - }, - }, - }, - } - - synctargetWorkspaceName, _ := logicalcluster.NewPath(tt.synctargetClusterName).Name() - handler := withPodAccessCheck( - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }), - func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - return fakeDownstreamInformers[gvr], nil - }, - synctargetWorkspaceName, tt.synctargetName, tt.syncTargetUID) - - request := httptest.NewRequest(http.MethodGet, tt.requestPath, nil) - handler.ServeHTTP(rw, request) - - require.Equal(t, tt.expectedStatusCode, rw.Code) - }) - } -} - -type fakeLister struct { - objs []runtime.Object -} - -func (f *fakeLister) List(selector labels.Selector) (ret []runtime.Object, err error) { - return f.objs, nil -} - -func (f *fakeLister) Get(name string) (ret runtime.Object, err error) { - for _, obj := range f.objs { - // return the unstructured object if the name matches - if obj.(metav1.Object).GetName() == name { - unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, err - } - return &unstructured.Unstructured{Object: unstructuredObj}, nil - } - } - return nil, apierrors.NewNotFound(schema.GroupResource{}, name) -} - -func (f *fakeLister) ByIndex(indexName, indexKey string) (ret []interface{}, err error) { - panic("implement me") -} - -func (f *fakeLister) ByNamespace(namespace string) cache.GenericNamespaceLister { - for f.objs[0].(metav1.Object).GetNamespace() == namespace { - return f - } - return nil -} diff --git a/pkg/syncer/upsync/upsync_cleanup_controller.go b/pkg/syncer/upsync/upsync_cleanup_controller.go deleted file mode 100644 index aee51a7c259..00000000000 --- a/pkg/syncer/upsync/upsync_cleanup_controller.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsync - -import ( - "context" - "fmt" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - syncerindexers "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/shared" -) - -const cleanupControllerName = "kcp-resource-upsyncer-cleanup" - -// NewUpSyncerCleanupController returns a new controller which will cleanup any upsynced upstream resource -// if the corresponding downstream resources doesn't exist. -func NewUpSyncerCleanupController(syncerLogger logr.Logger, syncTargetClusterName logicalcluster.Name, - syncTargetName string, syncTargetUID types.UID, syncTargetKey string, - upstreamClusterClient kcpdynamic.ClusterInterface, - ddsifForUpstreamUpsyncer *ddsif.DiscoveringDynamicSharedInformerFactory, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], -) (*cleanupController, error) { - c := &cleanupController{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), cleanupControllerName), - cleanupReconciler: cleanupReconciler{ - getUpstreamClient: func(clusterName logicalcluster.Name) (dynamic.Interface, error) { - return upstreamClusterClient.Cluster(clusterName.Path()), nil - }, - getDownstreamLister: func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", gvr) - } - return informer.Lister(), nil - }, - listDownstreamNamespacesByLocator: func(jsonLocator string) ([]*unstructured.Unstructured, error) { - nsInformer, err := ddsifForDownstream.ForResource(namespaceGVR) - if err != nil { - return nil, err - } - return indexers.ByIndex[*unstructured.Unstructured](nsInformer.Informer().GetIndexer(), syncerindexers.ByNamespaceLocatorIndexName, jsonLocator) - }, - - syncTargetName: syncTargetName, - syncTargetClusterName: syncTargetClusterName, - syncTargetUID: syncTargetUID, - }, - } - logger := logging.WithReconciler(syncerLogger, controllerName) - - ddsifForUpstreamUpsyncer.AddEventHandler(ddsif.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - c.enqueue(gvr, obj, logger) - }, - }) - - return c, nil -} - -type cleanupController struct { - queue workqueue.RateLimitingInterface - - cleanupReconciler -} - -func (c *cleanupController) enqueue(gvr schema.GroupVersionResource, obj interface{}, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return - } - logging.WithQueueKey(logger, key).V(2).Info("queueing", "gvr", gvr.String()) - queueKey := queueKey{ - gvr: gvr, - key: key, - } - - c.queue.Add(queueKey) -} - -func (c *cleanupController) Start(ctx context.Context, numThreads int) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting upsync workers") - defer logger.Info("Stopping upsync workers") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - <-ctx.Done() -} - -func (c *cleanupController) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *cleanupController) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(queueKey) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key.key).WithValues("gvr", key.gvr) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if requeue, err := c.process(ctx, key.key, key.gvr); err != nil { - utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q (%s), err: %w", controllerName, key.key, key.gvr.String(), err)) - c.queue.AddRateLimited(key) - return true - } else if requeue { - // only requeue if we didn't error, but we still want to requeue - c.queue.Add(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *cleanupController) process(ctx context.Context, key string, gvr schema.GroupVersionResource) (bool, error) { - logger := klog.FromContext(ctx) - - clusterName, namespace, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - utilruntime.HandleError(err) - return false, nil - } - - logger = logger.WithValues(logging.WorkspaceKey, clusterName, logging.NamespaceKey, namespace, logging.NameKey, name) - ctx = klog.NewContext(ctx, logger) - - var errs []error - requeue, err := c.cleanupReconciler.reconcile(ctx, gvr, clusterName, namespace, name) - if err != nil { - errs = append(errs, err) - } - - return requeue, utilerrors.NewAggregate(errs) -} diff --git a/pkg/syncer/upsync/upsync_cleanup_reconcile.go b/pkg/syncer/upsync/upsync_cleanup_reconcile.go deleted file mode 100644 index 0fed2baf1c1..00000000000 --- a/pkg/syncer/upsync/upsync_cleanup_reconcile.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsync - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -type cleanupReconciler struct { - getUpstreamClient func(clusterName logicalcluster.Name) (dynamic.Interface, error) - - getDownstreamLister func(gvr schema.GroupVersionResource) (cache.GenericLister, error) - listDownstreamNamespacesByLocator func(jsonLocator string) ([]*unstructured.Unstructured, error) - - syncTargetName string - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID -} - -func (c *cleanupReconciler) reconcile(ctx context.Context, gvr schema.GroupVersionResource, upstreamClusterName logicalcluster.Name, upstreamNamespace, upstreamName string) (bool, error) { - downstreamResource, err := c.getDownstreamResource(ctx, gvr, upstreamClusterName, upstreamNamespace, upstreamName) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - if downstreamResource != nil { - return false, nil - } - - // Downstream resource not present => force delete resource upstream (also remove finalizers) - err = c.deleteOrphanUpstreamResource(ctx, gvr, upstreamClusterName, upstreamNamespace, upstreamName) - if apierrors.IsNotFound(err) { - return false, nil - } - return false, err -} - -func (c *cleanupReconciler) getDownstreamResource(ctx context.Context, gvr schema.GroupVersionResource, upstreamClusterName logicalcluster.Name, upstreamNamespace, upstreamName string) (*unstructured.Unstructured, error) { - logger := klog.FromContext(ctx) - - downstreamNamespace := "" - if upstreamNamespace != "" { - // find downstream namespace through locator index - locator := shared.NewNamespaceLocator(upstreamClusterName, c.syncTargetClusterName, c.syncTargetUID, c.syncTargetName, upstreamNamespace) - locatorValue, err := json.Marshal(locator) - if err != nil { - return nil, err - } - downstreamNamespaces, err := c.listDownstreamNamespacesByLocator(string(locatorValue)) - if err != nil { - return nil, err - } - if len(downstreamNamespaces) == 1 { - namespace := downstreamNamespaces[0] - logger.WithValues(DownstreamName, namespace.GetName()).V(4).Info("Found downstream namespace for upstream namespace") - downstreamNamespace = namespace.GetName() - } else if len(downstreamNamespaces) > 1 { - // This should never happen unless there's some namespace collision. - var namespacesCollisions []string - for _, namespace := range downstreamNamespaces { - namespacesCollisions = append(namespacesCollisions, namespace.GetName()) - } - return nil, fmt.Errorf("(namespace collision) found multiple downstream namespaces: %s for upstream namespace %s|%s", strings.Join(namespacesCollisions, ","), upstreamClusterName, upstreamNamespace) - } else { - logger.V(4).Info("No downstream namespaces found") - return nil, nil - } - } - - // retrieve downstream object - downstreamLister, err := c.getDownstreamLister(gvr) - if err != nil { - return nil, err - } - - var downstreamObject runtime.Object - if downstreamNamespace != "" { - downstreamObject, err = downstreamLister.ByNamespace(downstreamNamespace).Get(upstreamName) - } else { - downstreamObject, err = downstreamLister.Get(upstreamName) - } - if err != nil { - return nil, err - } - - downstreamResource, ok := downstreamObject.(*unstructured.Unstructured) - if !ok { - return nil, fmt.Errorf("type mismatch of resource object: received %T", downstreamResource) - } - - return downstreamResource, nil -} - -func removeUpstreamResourceFinalizers(ctx context.Context, upstreamClient dynamic.Interface, gvr schema.GroupVersionResource, namespace, name string) error { - existingResource, err := upstreamClient.Resource(gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return err - } - if len(existingResource.GetFinalizers()) > 0 { - existingResource.SetFinalizers(nil) - if _, err := upstreamClient.Resource(gvr).Namespace(namespace).Update(ctx, existingResource, metav1.UpdateOptions{}); err != nil { - return err - } - } - return nil -} - -func (c *cleanupReconciler) deleteOrphanUpstreamResource(ctx context.Context, gvr schema.GroupVersionResource, upstreamClusterName logicalcluster.Name, upstreamNamespace, upstreamName string) error { - // Downstream resource not present => force delete resource upstream (also remove finalizers) - upstreamClient, err := c.getUpstreamClient(upstreamClusterName) - if err != nil { - return err - } - - if err := removeUpstreamResourceFinalizers(ctx, upstreamClient, gvr, upstreamNamespace, upstreamName); err != nil { - return err - } - - err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Delete(ctx, upstreamName, metav1.DeleteOptions{}) - return err -} diff --git a/pkg/syncer/upsync/upsync_controller.go b/pkg/syncer/upsync/upsync_controller.go deleted file mode 100644 index f876f5be587..00000000000 --- a/pkg/syncer/upsync/upsync_controller.go +++ /dev/null @@ -1,446 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsync - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/logging" - syncerindexers "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const controllerName = "kcp-resource-upsyncer" - -var namespaceGVR schema.GroupVersionResource = corev1.SchemeGroupVersion.WithResource("namespaces") - -// NewUpSyncer returns a new controller which upsyncs, through the Upsyncer virtual workspace, downstream resources -// which are part of the upsyncable resource types (fixed limited list for now), and provide -// the following labels: -// - internal.workload.kcp.io/cluster: -// - state.workload.kcp.io/: Upsync -// -// and optionally, for cluster-wide resources, the `kcp.io/namespace-locator` annotation -// filled with the information necessary identify the upstream workspace to upsync to. -func NewUpSyncer(syncerLogger logr.Logger, syncTargetClusterName logicalcluster.Name, - syncTargetName, syncTargetKey string, - getShardAccess synctarget.GetShardAccessFunc, downstreamClient dynamic.Interface, - ddsifForDownstream *ddsif.GenericDiscoveringDynamicSharedInformerFactory[cache.SharedIndexInformer, cache.GenericLister, informers.GenericInformer], - syncTargetUID types.UID) (*controller, error) { - c := &controller{ - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName), - reconciler: reconciler{ - cleanupReconciler: cleanupReconciler{ - getUpstreamClient: func(clusterName logicalcluster.Name) (dynamic.Interface, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - return shardAccess.UpsyncerClient.Cluster(clusterName.Path()), nil - }, - getDownstreamLister: func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - informers, notSynced := ddsifForDownstream.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the downstream informer factory", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the downstream informer factory", gvr) - } - return informer.Lister(), nil - }, - listDownstreamNamespacesByLocator: func(jsonLocator string) ([]*unstructured.Unstructured, error) { - nsInformer, err := ddsifForDownstream.ForResource(namespaceGVR) - if err != nil { - return nil, err - } - return indexers.ByIndex[*unstructured.Unstructured](nsInformer.Informer().GetIndexer(), syncerindexers.ByNamespaceLocatorIndexName, jsonLocator) - }, - - syncTargetName: syncTargetName, - syncTargetClusterName: syncTargetClusterName, - syncTargetUID: syncTargetUID, - }, - getUpstreamUpsyncerLister: func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - - informers, notSynced := shardAccess.UpsyncerDDSIF.Informers() - informer, ok := informers[gvr] - if !ok { - if shared.ContainsGVR(notSynced, gvr) { - return nil, fmt.Errorf("informer for gvr %v not synced in the upstream upsyncer informer factory - should retry", gvr) - } - return nil, fmt.Errorf("gvr %v should be known in the upstream upsyncer informer factory", gvr) - } - return informer.Lister().ByCluster(clusterName), nil - }, - getUpsyncedGVRs: func(clusterName logicalcluster.Name) ([]schema.GroupVersionResource, error) { - shardAccess, ok, err := getShardAccess(clusterName) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("shard-related clients not found for cluster %q", clusterName) - } - - informers, notSynced := shardAccess.UpsyncerDDSIF.Informers() - var result []schema.GroupVersionResource - for k := range informers { - result = append(result, k) - } - if len(notSynced) > 0 { - return result, fmt.Errorf("informers not synced in the upstream upsyncer informer factory for gvrs %v", notSynced) - } - return result, nil - }, - syncTargetKey: syncTargetKey, - }, - } - logger := logging.WithReconciler(syncerLogger, controllerName) - - ddsifForDownstream.AddEventHandler(ddsif.GVREventHandlerFuncs{ - AddFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if gvr == namespaceGVR { - return - } - new, ok := obj.(*unstructured.Unstructured) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", obj)) - return - } - if new.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] != string(workloadv1alpha1.ResourceStateUpsync) { - return - } - c.enqueueDownstream(gvr, new, logger, new.UnstructuredContent()["status"] != nil) - }, - UpdateFunc: func(gvr schema.GroupVersionResource, oldObj, newObj interface{}) { - if gvr == namespaceGVR { - return - } - old, ok := oldObj.(*unstructured.Unstructured) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", oldObj)) - return - } - new, ok := newObj.(*unstructured.Unstructured) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", newObj)) - return - } - if new.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] != string(workloadv1alpha1.ResourceStateUpsync) { - return - } - - oldStatus := old.UnstructuredContent()["status"] - newStatus := new.UnstructuredContent()["status"] - c.enqueueDownstream(gvr, new, logger, newStatus != nil && !equality.Semantic.DeepEqual(oldStatus, newStatus)) - }, - DeleteFunc: func(gvr schema.GroupVersionResource, obj interface{}) { - if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { - obj = d.Obj - } - unstr, ok := obj.(*unstructured.Unstructured) - if !ok { - utilruntime.HandleError(fmt.Errorf("resource should be a *unstructured.Unstructured, but was %T", unstr)) - return - } - - if gvr == namespaceGVR { - c.enqueueDeletedDownstreamNamespace(unstr, logger) - return - } - - if unstr.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] != string(workloadv1alpha1.ResourceStateUpsync) { - return - } - c.enqueueDownstream(gvr, unstr, logger, false) - }, - }) - return c, nil -} - -type controller struct { - queue workqueue.RateLimitingInterface - - dirtyStatusKeys sync.Map - - reconciler -} - -// queueKey is a composite queue key that combines the gvr and the key of the upstream -// resource that should be reconciled. -type queueKey struct { - gvr schema.GroupVersionResource - // key is the cluster-aware cache key of the upstream resource - key string -} - -func (c *controller) enqueueUpstream(gvr schema.GroupVersionResource, obj interface{}, logger logr.Logger, dirtyStatus bool) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - utilruntime.HandleError(err) - return - } - logging.WithQueueKey(logger, key).V(2).Info("queueing", "gvr", gvr.String()) - queueKey := queueKey{ - gvr: gvr, - key: key, - } - - if dirtyStatus { - c.dirtyStatusKeys.Store(queueKey, true) - } - c.queue.Add(queueKey) -} - -func (c *controller) enqueueDeletedDownstreamNamespace(deletedNamespace *unstructured.Unstructured, logger logr.Logger) { - upstreamLocator, locatorExists, err := shared.LocatorFromAnnotations(deletedNamespace.GetAnnotations()) - if err != nil { - utilruntime.HandleError(err) - return - } - if !locatorExists || upstreamLocator == nil { - logger.V(4).Info("the namespace locator doesn't exist on deleted downstream namespace.") - return - } - - if upstreamLocator.SyncTarget.UID != c.syncTargetUID || upstreamLocator.SyncTarget.ClusterName != c.syncTargetClusterName.String() { - return - } - - upsyncedGVRs, err := c.getUpsyncedGVRs(upstreamLocator.ClusterName) - if err != nil { - utilruntime.HandleError(err) - return - } - - for _, gvr := range upsyncedGVRs { - upstreamLister, err := c.getUpstreamUpsyncerLister(upstreamLocator.ClusterName, gvr) - if err != nil { - utilruntime.HandleError(err) - return - } - - upstreamUpsyncedResources, err := upstreamLister.ByNamespace(upstreamLocator.Namespace).List(labels.Everything()) - if err != nil { - utilruntime.HandleError(err) - return - } - - for _, upstreamUpsyncedResource := range upstreamUpsyncedResources { - c.enqueueUpstream(gvr, upstreamUpsyncedResource, logger, false) - } - } -} - -func (c *controller) enqueueDownstream(gvr schema.GroupVersionResource, downstreamObj *unstructured.Unstructured, logger logr.Logger, dirtyStatus bool) { - downstreamNamespace := downstreamObj.GetNamespace() - locatorHolder := downstreamObj - if downstreamNamespace != "" { - // get locator from namespace for namespaced objects - downstreamNamespaceLister, err := c.getDownstreamLister(namespaceGVR) - if err != nil { - utilruntime.HandleError(err) - return - } - nsObj, err := downstreamNamespaceLister.Get(downstreamNamespace) - if errors.IsNotFound(err) { - logger.V(4).Info("the downstream namespace doesn't exist anymore.") - return - } - if err != nil { - utilruntime.HandleError(err) - return - } - if unstr, ok := nsObj.(*unstructured.Unstructured); !ok { - utilruntime.HandleError(fmt.Errorf("downstream ns expected to be *unstructured.Unstructured got %T", nsObj)) - return - } else { - locatorHolder = unstr - } - } - - upstreamLocator, locatorExists, err := shared.LocatorFromAnnotations(locatorHolder.GetAnnotations()) - if err != nil { - utilruntime.HandleError(err) - return - } - if !locatorExists || upstreamLocator == nil { - logger.V(4).Info("the namespace locator doesn't exist on downstream namespace.") - return - } - - if upstreamLocator.SyncTarget.UID != c.syncTargetUID || upstreamLocator.SyncTarget.ClusterName != c.syncTargetClusterName.String() { - return - } - - c.enqueueUpstream(gvr, &metav1.ObjectMeta{ - Name: downstreamObj.GetName(), - Namespace: upstreamLocator.Namespace, - Annotations: map[string]string{ - logicalcluster.AnnotationKey: upstreamLocator.ClusterName.String(), - }, - }, logging.WithObject(logger, downstreamObj), dirtyStatus) -} - -func (c *controller) Start(ctx context.Context, numThreads int) { - defer utilruntime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), controllerName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting upsync workers") - defer logger.Info("Stopping upsync workers") - - for i := 0; i < numThreads; i++ { - go wait.UntilWithContext(ctx, c.startWorker, time.Second) - } - <-ctx.Done() -} - -func (c *controller) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *controller) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(queueKey) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key.key).WithValues("gvr", key.gvr) - ctx = klog.NewContext(ctx, logger) - logger.V(1).Info("processing key") - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if requeue, err := c.process(ctx, key.key, key.gvr); err != nil { - utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q (%s), err: %w", controllerName, key.key, key.gvr.String(), err)) - c.queue.AddRateLimited(key) - return true - } else if requeue { - // only requeue if we didn't error, but we still want to requeue - c.queue.Add(key) - return true - } - c.queue.Forget(key) - return true -} - -func (c *controller) process(ctx context.Context, key string, gvr schema.GroupVersionResource) (bool, error) { - logger := klog.FromContext(ctx) - - clusterName, namespace, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - utilruntime.HandleError(err) - return false, nil - } - - dirtyStatus := false - if dirtyStatusObj, found := c.dirtyStatusKeys.LoadAndDelete(queueKey{ - gvr: gvr, - key: key, - }); found { - dirtyStatus = dirtyStatusObj.(bool) - } - - resetDirty := func(requeue bool, err error) (bool, error) { - if dirtyStatus && err != nil { - c.dirtyStatusKeys.Store(queueKey{ - gvr: gvr, - key: key, - }, true) - } - return requeue, err - } - - upstreamLister, err := c.getUpstreamUpsyncerLister(clusterName, gvr) - if err != nil { - return resetDirty(false, err) - } - getter := upstreamLister.Get - if namespace != "" { - getter = upstreamLister.ByNamespace(namespace).Get - } - - upstreamObj, err := getter(name) - if err != nil && !errors.IsNotFound(err) { - return resetDirty(false, err) - } - - var upstreamResource *unstructured.Unstructured - if upstreamObj != nil { - var ok bool - upstreamResource, ok = upstreamObj.(*unstructured.Unstructured) - if !ok { - logger.Error(nil, "got unexpected type", "type", fmt.Sprintf("%T", upstreamObj)) - return false, nil // retrying won't help - } - } - logger = logger.WithValues(logging.WorkspaceKey, clusterName, logging.NamespaceKey, namespace, logging.NameKey, name) - ctx = klog.NewContext(ctx, logger) - - var errs []error - requeue, err := c.reconcile(ctx, upstreamResource, gvr, clusterName, namespace, name, dirtyStatus) - if err != nil { - errs = append(errs, err) - } - - return resetDirty(requeue, utilerrors.NewAggregate(errs)) -} diff --git a/pkg/syncer/upsync/upsync_process_test.go b/pkg/syncer/upsync/upsync_process_test.go deleted file mode 100644 index dd9a5b977ed..00000000000 --- a/pkg/syncer/upsync/upsync_process_test.go +++ /dev/null @@ -1,984 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsync - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - kcpfakedynamic "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/dynamic/fake" - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - dynamicfake "k8s.io/client-go/dynamic/fake" - clienttesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - ddsif "github.com/kcp-dev/kcp/pkg/informer" - "github.com/kcp-dev/kcp/pkg/syncer/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/synctarget" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var scheme *runtime.Scheme - -func init() { - scheme = runtime.NewScheme() - _ = corev1.AddToScheme(scheme) -} - -var _ ddsif.GVRSource = (*mockedGVRSource)(nil) - -type mockedGVRSource struct { - upsyncer bool -} - -func (s *mockedGVRSource) GVRs() map[schema.GroupVersionResource]ddsif.GVRPartialMetadata { - return map[schema.GroupVersionResource]ddsif.GVRPartialMetadata{ - { - Version: "v1", - Resource: "namespaces", - }: { - Scope: apiextensionsv1.ClusterScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "namespace", - Kind: "Namespace", - }, - }, - { - Version: "v1", - Resource: "persistentvolumes", - }: { - Scope: apiextensionsv1.ClusterScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "persistentvolume", - Kind: "PersistentVolume", - }, - }, - { - Version: "v1", - Resource: "pods", - }: { - Scope: apiextensionsv1.NamespaceScoped, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Singular: "pod", - Kind: "Pod", - }, - }, - } -} - -func (s *mockedGVRSource) Ready() bool { - return true -} - -func (s *mockedGVRSource) Subscribe() <-chan struct{} { - return make(<-chan struct{}) -} - -func TestUpsyncerprocess(t *testing.T) { - type testCase struct { - downstreamNamespace *corev1.Namespace - upstreamNamespaceName string - gvr schema.GroupVersionResource - downstreamResource runtime.Object - upstreamResource runtime.Object - doOnDownstream func(tc testCase, client dynamic.Interface) - - resourceToProcessName string - - upstreamLogicalCluster logicalcluster.Name - syncTargetName string - syncTargetClusterName logicalcluster.Name - syncTargetUID types.UID - expectError bool - expectRequeue bool - expectActionsOnDownstream []clienttesting.Action - expectActionsOnUpstream []kcptesting.Action - includeStatus bool - } - tests := map[string]testCase{ - "Upsyncer upsyncs namespaced resources": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - downstreamResource: pod("test-pod"). - WithNamespace("kcp-33jbiactwhg0"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Unstructured(t).WithoutFields("status").Unstructured, - upstreamResource: nil, - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewCreateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithoutFields("status").Unstructured), - }, - includeStatus: false, - }, - "Upsyncer upsyncs namespaced resources with status": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - downstreamResource: pod("test-pod"). - WithNamespace("kcp-33jbiactwhg0"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured, - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewCreateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured), - kcptesting.NewUpdateSubresourceAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "status", "test", pod("test-pod"). - WithNamespace("test"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured), - kcptesting.NewUpdateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured), - }, - includeStatus: true, - }, - "Upsyncer upsyncs cluster-wide resources": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "", - downstreamNamespace: nil, - gvr: corev1.SchemeGroupVersion.WithResource("persistentvolumes"), - downstreamResource: pv("test-pv"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":""}`, - }). - Unstructured(t).WithoutFields("status").Unstructured, - upstreamResource: nil, - resourceToProcessName: "test-pv", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewCreateAction(corev1.SchemeGroupVersion.WithResource("persistentvolumes"), logicalcluster.NewPath("root:org:ws"), "", pv("test-pv"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithoutFields("status").Unstructured), - }, - includeStatus: false, - }, - "Upsyncer updates namespaced resources": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - downstreamResource: pod("test-pod"). - WithNamespace("kcp-33jbiactwhg0"). - WithResourceVersion("2"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Unstructured(t).WithoutFields("status").Unstructured, - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewUpdateAction(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "2", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - WithResourceVersion("1"). - Unstructured(t).WithoutFields("status").Unstructured), - }, - includeStatus: false, - }, - "Upsyncer updates namespaced resources, even if they already have a deletion timestamp, but requeue": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - downstreamResource: pod("test-pod"). - WithNamespace("kcp-33jbiactwhg0"). - WithResourceVersion("2"). - WithDeletionTimestamp(). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Unstructured(t).WithoutFields("status").Unstructured, - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectRequeue: true, - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewUpdateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "2", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithoutFields("status").Unstructured), - }, - includeStatus: false, - }, - "Upsyncer udpates namespaced resources with status": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - downstreamResource: pod("test-pod"). - WithNamespace("kcp-33jbiactwhg0"). - WithResourceVersion("11"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured, - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "10", - }). - Unstructured(t).Unstructured, - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewUpdateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{"workload.kcp.io/rv": "10"}). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured), - kcptesting.NewUpdateSubresourceAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "status", "test", pod("test-pod"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "10", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured), - kcptesting.NewUpdateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "11", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithField("status", map[string]interface{}{"phase": "Running"}).Unstructured), - }, - includeStatus: true, - }, - "Upsyncer updates cluster-wide resources": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "", - downstreamNamespace: nil, - gvr: corev1.SchemeGroupVersion.WithResource("persistentvolumes"), - downstreamResource: pv("test-pv"). - WithResourceVersion("2"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":""}`, - }). - Unstructured(t).WithoutFields("status").Unstructured, - upstreamResource: pv("test-pv"). - WithClusterName("root:org:ws"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - Object(), - resourceToProcessName: "test-pv", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("persistentvolumes"), logicalcluster.NewPath("root:org:ws"), "", "test-pv"), - kcptesting.NewUpdateAction(corev1.SchemeGroupVersion.WithResource("persistentvolumes"), logicalcluster.NewPath("root:org:ws"), "", pv("test-pv"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "2", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Unstructured(t).WithoutFields("status").Unstructured), - }, - includeStatus: false, - }, - "Upsyncer deletes orphan upstream namespaced resources": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewDeleteAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - }, - includeStatus: false, - }, - "Upsyncer should delete orphan upstream namespaced resource, but it doesn't exist": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - }, - includeStatus: false, - }, - "Upsyncer deletes orphan upstream namespaced resources, even if the downstream namespace has also been deleted": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: nil, - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewDeleteAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - }, - includeStatus: false, - }, - "Upsyncer deletes orphan upstream namespaced resources when downstream namespace is deleted": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - downstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("kcp-33jbiactwhg0"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Object(), - doOnDownstream: func(tc testCase, client dynamic.Interface) { - err := client.Resource(namespaceGVR).Delete(context.Background(), "kcp-33jbiactwhg0", metav1.DeleteOptions{}) - require.NoError(t, err) - }, - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewDeleteAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - }, - includeStatus: false, - }, - "Upsyncer deletes orphan upstream cluster-wide resources": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "", - downstreamNamespace: nil, - gvr: corev1.SchemeGroupVersion.WithResource("persistentvolumes"), - downstreamResource: nil, - upstreamResource: pv("test-pv"). - WithClusterName("root:org:ws"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Object(), - resourceToProcessName: "test-pv", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("persistentvolumes"), logicalcluster.NewPath("root:org:ws"), "", "test-pv"), - kcptesting.NewDeleteAction(corev1.SchemeGroupVersion.WithResource("persistentvolumes"), logicalcluster.NewPath("root:org:ws"), "", "test-pv"), - }, - includeStatus: false, - }, - "Upsyncer deletes upstream resources if downstream resources have deletiontimestamp set": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }).Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - downstreamResource: pod("test-pod"). - WithNamespace("kcp-33jbiactwhg0"). - WithDeletionTimestamp(). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithResourceVersion("1"). - Object(), - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "workload.kcp.io/rv": "1", - }). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewDeleteAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - }, - includeStatus: false, - }, - "Upsyncer handles deletion of upstream resources when they have finalizers set": { - upstreamLogicalCluster: "root:org:ws", - upstreamNamespaceName: "test", - downstreamNamespace: namespace("kcp-33jbiactwhg0"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithAnnotations(map[string]string{ - "kcp.io/namespace-locator": `{"syncTarget": {"cluster":"root:org:ws", "name":"us-west1", "uid":"syncTargetUID"}, "cluster":"root:org:ws","namespace":"test"}`, - }). - Object(), - gvr: corev1.SchemeGroupVersion.WithResource("pods"), - upstreamResource: pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - WithFinalizers("workload.kcp.io/syncer-6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g"). - Object(), - resourceToProcessName: "test-pod", - syncTargetName: "us-west1", - expectActionsOnDownstream: []clienttesting.Action{}, - expectActionsOnUpstream: []kcptesting.Action{ - kcptesting.NewGetAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - kcptesting.NewUpdateAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", pod("test-pod"). - WithClusterName("root:org:ws"). - WithNamespace("test"). - WithResourceVersion("1"). - WithLabels(map[string]string{ - "internal.workload.kcp.io/cluster": "6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g", - "state.workload.kcp.io/6ohB8yeXhwqTQVuBzJRgqcRJTpRjX7yTZu5g5g": "Upsync", - }). - Unstructured(t).Unstructured), - kcptesting.NewDeleteAction(corev1.SchemeGroupVersion.WithResource("pods"), logicalcluster.NewPath("root:org:ws"), "test", "test-pod"), - }, - includeStatus: false, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - logger := klog.FromContext(ctx) - - kcpLogicalCluster := tc.upstreamLogicalCluster - syncTargetUID := tc.syncTargetUID - if tc.syncTargetUID == "" { - syncTargetUID = types.UID("syncTargetUID") - } - - if tc.syncTargetClusterName.Empty() { - tc.syncTargetClusterName = "root:org:ws" - } - - var allFromResources []runtime.Object - if tc.downstreamNamespace != nil { - allFromResources = append(allFromResources, tc.downstreamNamespace) - } - if tc.downstreamResource != nil { - allFromResources = append(allFromResources, tc.downstreamResource) - } - - fromClient := dynamicfake.NewSimpleDynamicClient(scheme, allFromResources...) - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(tc.syncTargetClusterName, tc.syncTargetName) - - var toResources []runtime.Object - if tc.upstreamResource != nil { - toResources = append(toResources, tc.upstreamResource) - } - toClusterClient := kcpfakedynamic.NewSimpleDynamicClient(scheme, toResources...) - - ddsifForUpstreamUpsyncer, err := ddsif.NewDiscoveringDynamicSharedInformerFactory(toClusterClient, nil, nil, &mockedGVRSource{true}, cache.Indexers{}) - require.NoError(t, err) - - ddsifForDownstream, err := ddsif.NewScopedDiscoveringDynamicSharedInformerFactory(fromClient, nil, - func(o *metav1.ListOptions) { - o.LabelSelector = workloadv1alpha1.InternalDownstreamClusterLabel + "=" + syncTargetKey - }, - &mockedGVRSource{}, - cache.Indexers{ - indexers.ByNamespaceLocatorIndexName: indexers.IndexByNamespaceLocator, - }, - ) - require.NoError(t, err) - - setupServersideApplyPatchReactor(toClusterClient) - fromClientResourceWatcherStarted := setupWatchReactor(t, tc.gvr.Resource, fromClient) - toClientResourceWatcherStarted := setupClusterWatchReactor(t, tc.gvr.Resource, toClusterClient) - - // upstream => to (kcp) - // downstream => from (physical cluster) - // to === kcp - // from === physical - controller, err := NewUpSyncer(logger, kcpLogicalCluster, tc.syncTargetName, syncTargetKey, func(clusterName logicalcluster.Name) (synctarget.ShardAccess, bool, error) { - return synctarget.ShardAccess{ - UpsyncerClient: toClusterClient, - UpsyncerDDSIF: ddsifForUpstreamUpsyncer, - }, true, nil - }, fromClient, ddsifForDownstream, syncTargetUID) - require.NoError(t, err) - - ddsifForUpstreamUpsyncer.Start(ctx.Done()) - ddsifForDownstream.Start(ctx.Done()) - - go ddsifForUpstreamUpsyncer.StartWorker(ctx) - go ddsifForDownstream.StartWorker(ctx) - - <-fromClientResourceWatcherStarted - <-toClientResourceWatcherStarted - - // The only GVRs we care about are the 3 listed below - t.Logf("waiting for upstream and downstream dynamic informer factories to be synced") - gvrs := sets.New[string]( - corev1.SchemeGroupVersion.WithResource("namespaces").String(), - corev1.SchemeGroupVersion.WithResource("pods").String(), - corev1.SchemeGroupVersion.WithResource("persistentvolumes").String(), - ) - require.Eventually(t, func() bool { - syncedUpstream, _ := ddsifForUpstreamUpsyncer.Informers() - foundUpstream := sets.New[string]() - for gvr := range syncedUpstream { - foundUpstream.Insert(gvr.String()) - } - - syncedDownstream, _ := ddsifForDownstream.Informers() - foundDownstream := sets.New[string]() - for gvr := range syncedDownstream { - foundDownstream.Insert(gvr.String()) - } - return foundUpstream.IsSuperset(gvrs) && foundDownstream.IsSuperset(gvrs) - }, wait.ForeverTestTimeout, 100*time.Millisecond) - t.Logf("upstream and downstream dynamic informer factories are synced") - - if tc.doOnDownstream != nil { - tc.doOnDownstream(tc, fromClient) - } - - fromClient.ClearActions() - toClusterClient.ClearActions() - - obj := &metav1.ObjectMeta{ - Name: tc.resourceToProcessName, - Namespace: tc.upstreamNamespaceName, - Annotations: map[string]string{ - logicalcluster.AnnotationKey: kcpLogicalCluster.String(), - }, - } - - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - require.NoError(t, err) - - if tc.includeStatus { - controller.dirtyStatusKeys.Store(queueKey{ - key: key, - gvr: tc.gvr, - }, true) - } - - requeue, err := controller.process(context.Background(), key, tc.gvr) - assert.Equal(t, tc.expectRequeue, requeue) - if tc.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - assert.Empty(t, cmp.Diff(tc.expectActionsOnDownstream, fromClient.Actions())) - assert.Empty(t, cmp.Diff(tc.expectActionsOnUpstream, toClusterClient.Actions())) - }) - } -} - -func setupWatchReactor(t *testing.T, resource string, client *dynamicfake.FakeDynamicClient) chan struct{} { - t.Helper() - watcherStarted := make(chan struct{}) - client.PrependWatchReactor(resource, func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := client.Tracker().Watch(gvr, ns) - if err != nil { - return false, nil, err - } - t.Logf("%s: watcher started", t.Name()) - close(watcherStarted) - return true, watch, nil - }) - return watcherStarted -} - -func setupClusterWatchReactor(t *testing.T, resource string, client *kcpfakedynamic.FakeDynamicClusterClientset) chan struct{} { - t.Helper() - watcherStarted := make(chan struct{}) - client.PrependWatchReactor(resource, func(action kcptesting.Action) (bool, watch.Interface, error) { - cluster := action.GetCluster() - gvr := action.GetResource() - ns := action.GetNamespace() - var watcher watch.Interface - var err error - switch cluster { - case logicalcluster.Wildcard: - watcher, err = client.Tracker().Watch(gvr, ns) - default: - watcher, err = client.Tracker().Cluster(cluster).Watch(gvr, ns) - } - t.Logf("%s: cluster watcher started", t.Name()) - close(watcherStarted) - return true, watcher, err - }) - return watcherStarted -} - -func setupServersideApplyPatchReactor(toClient *kcpfakedynamic.FakeDynamicClusterClientset) { - toClient.PrependReactor("patch", "*", func(action kcptesting.Action) (handled bool, ret runtime.Object, err error) { - patchAction := action.(kcptesting.PatchAction) - if patchAction.GetPatchType() != types.ApplyPatchType { - return false, nil, nil - } - return true, nil, err - }) -} - -type resourceBuilder[Type metav1.Object] struct { - obj Type -} - -func (r *resourceBuilder[Type]) Object() Type { - return r.obj -} - -type unstructuredType struct { - *unstructured.Unstructured - t *testing.T -} - -func (u *unstructuredType) WithoutFields(fieldsToPrune ...string) *unstructuredType { - for _, field := range fieldsToPrune { - unstructured.RemoveNestedField(u.Object, strings.Split(field, ".")...) - } - - return u -} - -func (u *unstructuredType) WithField(key string, value interface{}) *unstructuredType { - err := unstructured.SetNestedField(u.Object, value, strings.Split(key, ".")...) - require.NoError(u.t, err) - return u -} - -func (r *resourceBuilder[Type]) Unstructured(t *testing.T) *unstructuredType { - var unstr unstructured.Unstructured - err := scheme.Convert(r.obj, &unstr, nil) - require.NoError(t, err) - result := unstructuredType{&unstr, t} - return &result -} - -func (r *resourceBuilder[Type]) WithNamespace(namespace string) *resourceBuilder[Type] { - r.obj.SetNamespace(namespace) - return r -} - -func (r *resourceBuilder[Type]) WithResourceVersion(resourceVersion string) *resourceBuilder[Type] { - r.obj.SetResourceVersion(resourceVersion) - return r -} - -func (r *resourceBuilder[Type]) WithDeletionTimestamp() *resourceBuilder[Type] { - now := metav1.Now() - r.obj.SetDeletionTimestamp(&now) - return r -} - -func (r *resourceBuilder[Type]) WithClusterName(clusterName string) *resourceBuilder[Type] { - annotations := r.obj.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations[logicalcluster.AnnotationKey] = clusterName - - r.obj.SetAnnotations(annotations) - return r -} - -func (r *resourceBuilder[Type]) WithAnnotations(additionalAnnotations map[string]string) *resourceBuilder[Type] { - annotations := r.obj.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - for k, v := range additionalAnnotations { - annotations[k] = v - } - - r.obj.SetAnnotations(annotations) - return r -} - -func (r *resourceBuilder[Type]) WithLabels(additionalLabels map[string]string) *resourceBuilder[Type] { - labels := r.obj.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - for k, v := range additionalLabels { - labels[k] = v - } - - r.obj.SetLabels(labels) - return r -} - -func (r *resourceBuilder[Type]) WithFinalizers(finalizers ...string) *resourceBuilder[Type] { - r.obj.SetFinalizers(finalizers) - return r -} - -func newResourceBuilder[Type metav1.Object](obj Type, name string) *resourceBuilder[Type] { - obj.SetName(name) - return &resourceBuilder[Type]{obj} -} - -func namespace(name string) *resourceBuilder[*corev1.Namespace] { - return newResourceBuilder(&corev1.Namespace{}, name) -} - -func pv(name string) *resourceBuilder[*corev1.PersistentVolume] { - return newResourceBuilder(&corev1.PersistentVolume{}, name) -} - -func pod(name string) *resourceBuilder[*corev1.Pod] { - return newResourceBuilder(&corev1.Pod{}, name) -} diff --git a/pkg/syncer/upsync/upsync_reconcile.go b/pkg/syncer/upsync/upsync_reconcile.go deleted file mode 100644 index b763780c883..00000000000 --- a/pkg/syncer/upsync/upsync_reconcile.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsync - -import ( - "context" - - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" -) - -const ( - // ResourceVersionAnnotation is an annotation set on a resource upsynced upstream - // that contains the resourceVersion of the corresponding downstream resource - // when it was last upsynced. - // It is used to check easily, without having to compare the resource contents, - // whether an upsynced upstream resource is up-to-date with the downstream resource. - ResourceVersionAnnotation = "workload.kcp.io/rv" -) - -type reconciler struct { - cleanupReconciler - - getUpstreamUpsyncerLister func(clusterName logicalcluster.Name, gvr schema.GroupVersionResource) (cache.GenericLister, error) - getUpsyncedGVRs func(clusterName logicalcluster.Name) ([]schema.GroupVersionResource, error) - - syncTargetKey string -} - -func (c *reconciler) reconcile(ctx context.Context, upstreamObject *unstructured.Unstructured, gvr schema.GroupVersionResource, upstreamClusterName logicalcluster.Name, upstreamNamespace, upstreamName string, dirtyStatus bool) (bool, error) { - logger := klog.FromContext(ctx) - - downstreamResource, err := c.cleanupReconciler.getDownstreamResource(ctx, gvr, upstreamClusterName, upstreamNamespace, upstreamName) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - if downstreamResource == nil { - // Downstream resource not present => force delete resource upstream (also remove finalizers) - err = c.deleteOrphanUpstreamResource(ctx, gvr, upstreamClusterName, upstreamNamespace, upstreamName) - if apierrors.IsNotFound(err) { - return false, nil - } - return false, err - } - - upstreamClient, err := c.getUpstreamClient(upstreamClusterName) - if err != nil { - return false, err - } - - logger = logger.WithValues(DownstreamNamespace, downstreamResource.GetNamespace()) - ctx = klog.NewContext(ctx, logger) - - downstreamRV := downstreamResource.GetResourceVersion() - markedForDeletionDownstream := downstreamResource.GetDeletionTimestamp() != nil - - // (potentially) create object upstream - if upstreamObject == nil { - if markedForDeletionDownstream { - return false, nil - } - logger.V(1).Info("Creating resource upstream") - preparedResource := c.prepareResourceForUpstream(ctx, gvr, upstreamNamespace, upstreamClusterName, downstreamResource) - - if !dirtyStatus { - // if no status needs to be upsynced upstream, then we can set the resource version annotation at the same time as we create the - // resource - preparedResource.SetAnnotations(addResourceVersionAnnotation(downstreamRV, preparedResource.GetAnnotations())) - // Create the resource - _, err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Create(ctx, preparedResource, metav1.CreateOptions{}) - return false, err - } - - // Status also needs to be upsynced so let's do it in 3 steps: - // - create the resource - createdResource, err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Create(ctx, preparedResource, metav1.CreateOptions{}) - if err != nil { - return false, err - } - // - update the status as a distinct action, - preparedResource.SetResourceVersion(createdResource.GetResourceVersion()) - updatedResource, err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).UpdateStatus(ctx, preparedResource, metav1.UpdateOptions{}) - if err != nil { - return false, err - } - // - finally update the main content again to set the resource version annotation to the value of the downstream resource version - preparedResource.SetAnnotations(addResourceVersionAnnotation(downstreamRV, preparedResource.GetAnnotations())) - preparedResource.SetResourceVersion(updatedResource.GetResourceVersion()) - _, err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Update(ctx, preparedResource, metav1.UpdateOptions{}) - return false, err - } - - // update upstream when annotation RV differs - resourceVersionUpstream := upstreamObject.GetAnnotations()[ResourceVersionAnnotation] - if downstreamRV != resourceVersionUpstream { - logger.V(1).Info("Updating upstream resource") - preparedResource := c.prepareResourceForUpstream(ctx, gvr, upstreamNamespace, upstreamClusterName, downstreamResource) - if err != nil { - return false, err - } - - // quick path: status unchanged, only update main resource - if !dirtyStatus { - preparedResource.SetAnnotations(addResourceVersionAnnotation(downstreamRV, preparedResource.GetAnnotations())) - existingResource, err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Get(ctx, preparedResource.GetName(), metav1.GetOptions{}) - if err != nil { - return false, err - } - preparedResource.SetResourceVersion(existingResource.GetResourceVersion()) - _, err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Update(ctx, preparedResource, metav1.UpdateOptions{}) - // If the downstream resource is marked for deletion, let's requeue it to manage the deletion timestamp - return markedForDeletionDownstream, err - } - - // slow path: status changed => we need 3 steps - // 1. update main resource - - preparedResource.SetAnnotations(addResourceVersionAnnotation(resourceVersionUpstream, preparedResource.GetAnnotations())) - existingResource, err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Get(ctx, preparedResource.GetName(), metav1.GetOptions{}) - if err != nil { - return false, err - } - preparedResource.SetResourceVersion(existingResource.GetResourceVersion()) - updatedResource, err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Update(ctx, preparedResource, metav1.UpdateOptions{}) - if err != nil { - return false, err - } - - // 2. update the status as a distinct action, - preparedResource.SetResourceVersion(updatedResource.GetResourceVersion()) - updatedResource, err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).UpdateStatus(ctx, preparedResource, metav1.UpdateOptions{}) - if err != nil { - return false, err - } - - // 3. finally update the main resource again to set the resource version annotation to the value of the downstream resource version. - preparedResource.SetAnnotations(addResourceVersionAnnotation(downstreamRV, preparedResource.GetAnnotations())) - preparedResource.SetResourceVersion(updatedResource.GetResourceVersion()) - _, err = upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Update(ctx, preparedResource, metav1.UpdateOptions{}) - // If the downstream resource is marked for deletion, let's requeue it to manage the deletion timestamp - return markedForDeletionDownstream, err - } - - if downstreamResource.GetDeletionTimestamp() != nil { - if err := upstreamClient.Resource(gvr).Namespace(upstreamNamespace).Delete(ctx, upstreamName, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { - return false, err - } - } - - return false, nil -} - -func (c *reconciler) prepareResourceForUpstream(ctx context.Context, gvr schema.GroupVersionResource, upstreamNS string, upstreamLogicalCluster logicalcluster.Name, downstreamObj *unstructured.Unstructured) *unstructured.Unstructured { - // Make a deepcopy - resourceToUpsync := downstreamObj.DeepCopy() - annotations := resourceToUpsync.GetAnnotations() - if annotations != nil { - delete(annotations, shared.NamespaceLocatorAnnotation) - resourceToUpsync.SetAnnotations(annotations) - } - labels := resourceToUpsync.GetLabels() - if labels != nil { - delete(labels, workloadv1alpha1.InternalDownstreamClusterLabel) - resourceToUpsync.SetLabels(labels) - } - resourceToUpsync.SetNamespace(upstreamNS) - resourceToUpsync.SetUID("") - resourceToUpsync.SetResourceVersion("") - resourceToUpsync.SetManagedFields(nil) - resourceToUpsync.SetDeletionTimestamp(nil) - resourceToUpsync.SetDeletionGracePeriodSeconds(nil) - resourceToUpsync.SetOwnerReferences(nil) - resourceToUpsync.SetFinalizers([]string{shared.SyncerFinalizerNamePrefix + c.syncTargetKey}) - - return resourceToUpsync -} - -func addResourceVersionAnnotation(resourceVersion string, annotations map[string]string) map[string]string { - if annotations == nil { - annotations = make(map[string]string, 1) - } - annotations[ResourceVersionAnnotation] = resourceVersion - return annotations -} diff --git a/pkg/tunneler/dialer.go b/pkg/tunneler/dialer.go deleted file mode 100644 index ac2febc2fcb..00000000000 --- a/pkg/tunneler/dialer.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -// Based on https://github.com/golang/build/blob/master/tunneler/v2/tunneler.go - -import ( - "bufio" - "context" - "encoding/json" - "errors" - "fmt" - "log" - "net" - "sync" - "time" - - "k8s.io/klog/v2" -) - -// The Dialer can create new connections back to the origin. -// A Dialer can have multiple clients. -type Dialer struct { - conn net.Conn // control plane connection - incomingConn chan net.Conn // data plane connections - connReady chan bool - pickupFailed chan error - donec chan struct{} - closeOnce sync.Once -} - -// NewDialer returns the side of the connection which will initiate -// new connections over the already established reverse connections. -func NewDialer(conn net.Conn) *Dialer { - d := &Dialer{ - conn: conn, - donec: make(chan struct{}), - connReady: make(chan bool), - pickupFailed: make(chan error), - incomingConn: make(chan net.Conn), - } - go d.serve() - return d -} - -// serve blocks and runs the control message loop, keeping the peer -// alive and notifying the peer when new connections are available. -func (d *Dialer) serve() { - defer d.Close() - go func() { - defer d.Close() - br := bufio.NewReader(d.conn) - for { - line, err := br.ReadSlice('\n') - if err != nil { - return - } - select { - case <-d.donec: - return - default: - } - var msg controlMsg - if err := json.Unmarshal(line, &msg); err != nil { - log.Printf("tunneler.Dialer read invalid JSON: %q: %v", line, err) - return - } - switch msg.Command { - case "pickup-failed": - err := fmt.Errorf("tunneler listener failed to pick up connection: %v", msg.Err) - select { - case d.pickupFailed <- err: - case <-d.donec: - return - } - } - } - }() - for { - if err := d.sendMessage(controlMsg{Command: "keep-alive"}); err != nil { - return - } - - t := time.NewTimer(30 * time.Second) - select { - case <-t.C: - continue - case <-d.connReady: - if err := d.sendMessage(controlMsg{ - Command: "conn-ready", - ConnPath: "", - }); err != nil { - return - } - case <-d.donec: - return - } - } -} - -func (d *Dialer) sendMessage(m controlMsg) error { - j, err := json.Marshal(m) - if err != nil { - return err - } - err = d.conn.SetWriteDeadline(time.Now().Add(10 * time.Second)) - if err != nil { - return err - } - j = append(j, '\n') - _, err = d.conn.Write(j) - if err != nil { - return err - } - return d.conn.SetWriteDeadline(time.Time{}) -} - -// Done returns a channel which is closed when d is closed (either by -// this process on purpose, by a local error, or close or error from -// the peer). -func (d *Dialer) Done() <-chan struct{} { return d.donec } - -// Close closes the Dialer. -func (d *Dialer) Close() error { - d.closeOnce.Do(d.close) - return nil -} - -func (d *Dialer) close() { - d.conn.Close() - close(d.donec) -} - -// Dial creates a new connection back to the Listener. -func (d *Dialer) Dial(ctx context.Context, network string, address string) (net.Conn, error) { - now := time.Now() - defer klog.FromContext(ctx).V(5).WithValues("address", address, "duration", time.Since(now)).Info("dialed") - // First, tell serve that we want a connection: - select { - case d.connReady <- true: - case <-d.donec: - return nil, errors.New("tunneler.Dialer closed") - case <-ctx.Done(): - return nil, ctx.Err() - } - - // Then pick it up: - select { - case c := <-d.incomingConn: - return c, nil - case err := <-d.pickupFailed: - return nil, err - case <-d.donec: - return nil, errors.New("tunneler.Dialer closed") - case <-ctx.Done(): - return nil, ctx.Err() - } -} diff --git a/pkg/tunneler/integration_test.go b/pkg/tunneler/integration_test.go deleted file mode 100644 index de7665b16b1..00000000000 --- a/pkg/tunneler/integration_test.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" -) - -// requestInfoHandler is a helping function to populate the requestInfo of a request as expected -// by the WithSyncerTunnelHandler. -func requestInfoHandler(handler http.Handler) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - if _, ok := genericapirequest.RequestInfoFrom(ctx); ok { - handler.ServeHTTP(w, r) - return - } - r = r.WithContext(genericapirequest.WithRequestInfo(ctx, - &genericapirequest.RequestInfo{ - IsResourceRequest: true, - APIGroup: "workload.kcp.io", - APIVersion: "v1alpha1", - Resource: "synctargets", - Subresource: "tunnel", - Name: "d001", - }, - )) - r = r.WithContext(genericapirequest.WithCluster(r.Context(), genericapirequest.Cluster{Name: "ws"})) - handler.ServeHTTP(w, r) - }) -} - -func setup(t *testing.T) (string, *tunneler, func()) { - t.Helper() - backend := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello world") - })) - backend.EnableHTTP2 = true - backend.StartTLS() - - // public server - mux := http.NewServeMux() - tunneler := NewTunneler() - apiHandler := tunneler.WithSyncerTunnelHandler(mux) - apiHandler = requestInfoHandler(apiHandler) - publicServer := httptest.NewUnstartedServer(apiHandler) - publicServer.EnableHTTP2 = true - publicServer.StartTLS() - - // private server - dstURL, err := SyncerTunnelURL(publicServer.URL, "ws", "d001") - if err != nil { - t.Fatal(err) - } - l, err := NewListener(publicServer.Client(), dstURL) - if err != nil { - t.Fatal(err) - } - - // reverse proxy queries to an internal host - url, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - proxy := httputil.NewSingleHostReverseProxy(url) - proxy.Transport = backend.Client().Transport - server := &http.Server{Handler: proxy} - //nolint:errcheck - go server.Serve(l) - - // client - // wait for the reverse connection to be established - time.Sleep(1 * time.Second) - stop := func() { - l.Close() - server.Close() - publicServer.Close() - backend.Close() - } - return dstURL, tunneler, stop -} - -func Test_integration(t *testing.T) { - uri, tunneler, stop := setup(t) - rw := httptest.NewRecorder() - b := &bytes.Buffer{} - req, err := http.NewRequest(http.MethodGet, uri, b) //nolint:noctx - require.NoError(t, err) - tunneler.Proxy("ws", "d001", rw, req) - defer stop() - - response := rw.Result() - body, err := io.ReadAll(response.Body) - defer response.Body.Close() - if err != nil { - t.Fatalf("Reading body failed: %s", err) - } - - // Log the request body - bodyString := string(body) - if bodyString != "Hello world" { - t.Errorf("Expected %s received %s", "Hello world", bodyString) - } -} - -func Test_integration_multiple_connections(t *testing.T) { - uri, tunneler, stop := setup(t) - defer stop() - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() - rw := httptest.NewRecorder() - b := &bytes.Buffer{} - req, err := http.NewRequest(http.MethodGet, uri, b) //nolint:noctx - require.NoError(t, err) - tunneler.Proxy("ws", "d001", rw, req) - - response := rw.Result() - body, err := io.ReadAll(response.Body) - defer response.Body.Close() - if err != nil { - t.Errorf("Reading body failed: %s", err) - } - - // Log the request body - bodyString := string(body) - if bodyString != "Hello world" { - t.Errorf("Expected %s received %s", "Hello world", bodyString) - } - }() - } - wg.Wait() -} - -func Test_integration_listener_reconnect(t *testing.T) { - backend := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello world") - })) - backend.EnableHTTP2 = true - backend.StartTLS() - defer backend.Close() - - // public server - mux := http.NewServeMux() - tunneler := NewTunneler() - apiHandler := tunneler.WithSyncerTunnelHandler(mux) - apiHandler = requestInfoHandler(apiHandler) - publicServer := httptest.NewUnstartedServer(apiHandler) - publicServer.EnableHTTP2 = true - publicServer.StartTLS() - defer publicServer.Close() - - // private server - dstURL, err := SyncerTunnelURL(publicServer.URL, "ws", "d001") - if err != nil { - t.Fatal(err) - } - l, err := NewListener(publicServer.Client(), dstURL) - if err != nil { - t.Fatal(err) - } - - // reverse proxy queries to an internal host - url, err := url.Parse(backend.URL) - if err != nil { - t.Fatal(err) - } - proxy := httputil.NewSingleHostReverseProxy(url) - proxy.Transport = backend.Client().Transport - server := &http.Server{Handler: proxy} - //nolint:errcheck - go server.Serve(l) - defer server.Close() - - // client - // wait for the reverse connection to be established - time.Sleep(1 * time.Second) - - rw := httptest.NewRecorder() - b := &bytes.Buffer{} - req, err := http.NewRequest(http.MethodGet, dstURL, b) //nolint:noctx - require.NoError(t, err) - tunneler.Proxy("ws", "d001", rw, req) - - response := rw.Result() - body, err := io.ReadAll(response.Body) - defer response.Body.Close() - if err != nil { - t.Fatalf("Reading body failed: %s", err) - } - // Log the request body - bodyString := string(body) - if bodyString != "Hello world" { - t.Errorf("Expected %s received %s", "Hello world", bodyString) - } - - // reconnect - server.Close() - l.Close() - <-l.donec - l = nil - - l2, err := NewListener(publicServer.Client(), dstURL) - if err != nil { - t.Fatal(err) - } - defer l2.Close() - server2 := &http.Server{Handler: proxy} - //nolint:errcheck - go server2.Serve(l2) - defer server2.Close() - - rw2 := httptest.NewRecorder() - b2 := &bytes.Buffer{} - req2, err := http.NewRequest(http.MethodGet, dstURL, b2) //nolint:noctx - require.NoError(t, err) - tunneler.Proxy("ws", "d001", rw2, req2) - - response = rw2.Result() - body, err = io.ReadAll(response.Body) - defer response.Body.Close() - if err != nil { - t.Fatalf("Reading body failed: %s", err) - } - - // Log the request body - bodyString = string(body) - if bodyString != "Hello world" { - t.Errorf("Expected %s received %s", "Hello world", bodyString) - } -} diff --git a/pkg/tunneler/listener.go b/pkg/tunneler/listener.go deleted file mode 100644 index 3ae8386ad4e..00000000000 --- a/pkg/tunneler/listener.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -// Based on https://github.com/golang/build/blob/master/tunneler/v2/tunneler.go - -import ( - "bufio" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "math/rand" - "net" - "net/http" - "sync" - "time" - - "github.com/aojea/rwconn" - "golang.org/x/net/http2" - - "k8s.io/klog/v2" -) - -var _ net.Listener = (*Listener)(nil) - -// Listener is a net.Listener, returning new connections which arrive -// from a corresponding Dialer. -type Listener struct { - url string - client *http.Client - - sc net.Conn // control plane connection - connc chan net.Conn - donec chan struct{} - writec chan<- []byte - - mu sync.Mutex // guards below, closing connc, and writing to rw - readErr error - closed bool -} - -// NewListener returns a new Listener, it dials to the Dialer -// creating "reverse connection" that are accepted by this Listener. -// - client: http client, required for TLS -// - url: a URL to the base of the reverse handler on the Dialer. -func NewListener(client *http.Client, url string) (*Listener, error) { - err := configureHTTP2Transport(client) - if err != nil { - return nil, err - } - - ln := &Listener{ - url: url, - client: client, - connc: make(chan net.Conn, 4), // arbitrary - donec: make(chan struct{}), - } - - // create control plane connection - // poor man backoff retry - sleep := 1 * time.Second - var c net.Conn - for attempts := 5; attempts > 0; attempts-- { - c, err = ln.dial() - if err != nil { - klog.Background().V(5).WithValues("err", err).Info("can not create control connection") - // Add some randomness to prevent creating a Thundering Herd - jitter := time.Duration(rand.Int63n(int64(sleep))) - sleep = 2*sleep + jitter/2 - time.Sleep(sleep) - } else { - ln.sc = c - break - } - } - if c == nil || err != nil { - return nil, err - } - - go ln.run() - return ln, nil -} - -// run establish reverse connections against the server. -func (ln *Listener) run() { - defer ln.Close() - - // Write loop - writec := make(chan []byte, 8) - ln.writec = writec - go func() { - for { - select { - case <-ln.donec: - return - case msg := <-writec: - if _, err := ln.sc.Write(msg); err != nil { - log.Printf("tunneler.Listener: error writing message to server: %v", err) - ln.Close() - return - } - } - } - }() - - // Read loop - br := bufio.NewReader(ln.sc) - for { - line, err := br.ReadSlice('\n') - if err != nil { - return - } - var msg controlMsg - if err := json.Unmarshal(line, &msg); err != nil { - log.Printf("tunneler.Listener read invalid JSON: %q: %v", line, err) - return - } - switch msg.Command { - case "keep-alive": - // Occasional no-op message from server to keep - // us alive through NAT timeouts. - case "conn-ready": - go ln.grabConn() - default: - // Ignore unknown messages - } - } -} - -func (ln *Listener) sendMessage(m controlMsg) { - j, _ := json.Marshal(m) //nolint:errchkjson - j = append(j, '\n') - ln.writec <- j -} - -func (ln *Listener) dial() (net.Conn, error) { - connect := ln.url + "/" + tunnelSubresourcePath - pr, pw := io.Pipe() - req, err := http.NewRequest(http.MethodGet, connect, pr) //nolint:noctx - if err != nil { - klog.Background().V(5).WithValues("err", err).Info("can not create request") - return nil, err - } - - logger := klog.Background().WithValues("address", connect) - logger.V(5).Info("listener creating connection to address") - res, err := ln.client.Do(req) //nolint:bodyclose // Seems we're returning the connection with res.Body, caller closes it? - if err != nil { - logger.V(5).WithValues("err", err).Info("can not connect to address") - return nil, err - } - if res.StatusCode != http.StatusOK { - logger.V(5).WithValues("statusCode", res.StatusCode).Info("status code on request") - return nil, fmt.Errorf("status code %d", res.StatusCode) - } - - conn := rwconn.NewConn(res.Body, pw) - return conn, nil -} - -func (ln *Listener) grabConn() { - // create a new connection - c, err := ln.dial() - if err != nil { - klog.Background().V(5).WithValues("err", err).Info("can not create connection") - ln.sendMessage(controlMsg{Command: "pickup-failed", ConnPath: "", Err: err.Error()}) - return - } - - // send the connection to the listener - select { - case <-ln.donec: - return - default: - select { - case ln.connc <- c: - case <-ln.donec: - return - } - } -} - -// Accept blocks and returns a new connection, or an error. -func (ln *Listener) Accept() (net.Conn, error) { - c, ok := <-ln.connc - if !ok { - ln.mu.Lock() - err, closed := ln.readErr, ln.closed - ln.mu.Unlock() - if err != nil && !closed { - return nil, fmt.Errorf("tunneler: Listener closed; %w", err) - } - return nil, ErrListenerClosed - } - klog.Background().V(5).Info("accepted connection") - return c, nil -} - -// ErrListenerClosed is returned by Accept after Close has been called. -var ErrListenerClosed = errors.New("tunneler: Listener closed") - -// Close closes the Listener, making future Accept calls return an -// error. -func (ln *Listener) Close() error { - ln.mu.Lock() - defer ln.mu.Unlock() - if ln.closed { - return nil - } - ln.closed = true - close(ln.connc) - close(ln.donec) - ln.sc.Close() - return nil -} - -// Addr returns a dummy address. This exists only to conform to the -// net.Listener interface. -func (ln *Listener) Addr() net.Addr { return connAddr{} } - -// configureHTTP2Transport enable ping to avoid issues with stale connections. -func configureHTTP2Transport(client *http.Client) error { - t, ok := client.Transport.(*http.Transport) - if !ok { - // can't get the transport it will fail later if not http2 supported - return nil - } - - if t.TLSClientConfig == nil { - return fmt.Errorf("only TLS supported") - } - - for _, v := range t.TLSClientConfig.NextProtos { - // http2 already configured - if v == "h2" { - return nil - } - } - - t2, err := http2.ConfigureTransports(t) - if err != nil { - return err - } - - t2.ReadIdleTimeout = time.Duration(30) * time.Second - t2.PingTimeout = time.Duration(15) * time.Second - return nil -} - -type connAddr struct{} - -func (connAddr) Network() string { return "rwconn" } -func (connAddr) String() string { return "rwconn" } diff --git a/pkg/tunneler/listener_test.go b/pkg/tunneler/listener_test.go deleted file mode 100644 index 8e7fd25d78a..00000000000 --- a/pkg/tunneler/listener_test.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Based on https://github.com/golang/build/blob/master/revdial/v2/revdial.go -package tunneler - -import "testing" - -func Test_SyncerTunnelURL(t *testing.T) { - tests := []struct { - name string - host string - ws string - target string - want string - wantErr bool - }{ - { - name: "valid", - host: "https://host:9443/base", - ws: "myws", - target: "syncer001", - want: "https://host:9443/base/clusters/myws/apis/workload.kcp.io/v1alpha1/synctargets/syncer001", - }, - { - name: "invalid host scheme", - host: "http://host:9443/base", - ws: "myws", - target: "syncer001", - wantErr: true, - }, - { - name: "invalid host port", - host: "https://host:port/base", - ws: "myws", - target: "syncer001", - wantErr: true, - }, - { - name: "empty ws", - host: "https://host:9443/base", - ws: "", - target: "syncer002", - wantErr: true, - }, - - { - name: "empty target", - host: "https://host:9443/base", - ws: "myws", - target: "", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := SyncerTunnelURL(tt.host, tt.ws, tt.target) - if (err != nil) != tt.wantErr { - t.Errorf("SyncerTunnelURL() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("SyncerTunnelURL() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/tunneler/podsubresourceproxy_handler.go b/pkg/tunneler/podsubresourceproxy_handler.go deleted file mode 100644 index e710498b864..00000000000 --- a/pkg/tunneler/podsubresourceproxy_handler.go +++ /dev/null @@ -1,274 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -import ( - "context" - "fmt" - "net/http" - "net/http/httputil" - "net/url" - "strings" - - "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" - "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" -) - -var ( - errorScheme = runtime.NewScheme() - errorCodecs = serializer.NewCodecFactory(errorScheme) -) - -func init() { - errorScheme.AddUnversionedTypes(metav1.Unversioned, - &metav1.Status{}, - ) -} - -// WithPodSubresourceProxying proxies the POD subresource requests using the syncer tunneler. -func (tn *tunneler) WithPodSubresourceProxying(apiHandler http.Handler, kcpclient dynamic.ClusterInterface, kcpInformer kcpinformers.SharedInformerFactory, globalKcpInformer kcpinformers.SharedInformerFactory) http.Handler { - syncTargetInformer, err := kcpInformer.ForResource(workloadv1alpha1.SchemeGroupVersion.WithResource("synctargets")) - if err != nil { - panic(err) - } - globalSyncTargetInformer, err := globalKcpInformer.ForResource(workloadv1alpha1.SchemeGroupVersion.WithResource("synctargets")) - if err != nil { - panic(err) - } - - return &podSubresourceProxyHandler{ - proxyFunc: tn.Proxy, - apiHandler: apiHandler, - getPodByName: func(ctx context.Context, cluster logicalcluster.Name, namespace, podName string) (*corev1.Pod, error) { - unstr, err := kcpclient.Cluster(cluster.Path()).Resource(corev1.SchemeGroupVersion.WithResource("pods")).Namespace(namespace).Get(ctx, podName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - pod := &corev1.Pod{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, pod); err != nil { - return nil, err - } - return pod, nil - }, - getSyncTargetBySynctargetKey: func(ctx context.Context, synctargetKey string) (*workloadv1alpha1.SyncTarget, error) { - synctargets, err := indexers.ByIndexWithFallback[*workloadv1alpha1.SyncTarget](syncTargetInformer.Informer().GetIndexer(), globalSyncTargetInformer.Informer().GetIndexer(), indexers.SyncTargetsBySyncTargetKey, synctargetKey) - if err != nil { - return nil, err - } - if len(synctargets) != 1 { - return nil, fmt.Errorf("expected 1 synctarget for key %q, got %d", synctargetKey, len(synctargets)) - } - return synctargets[0], nil - }, - } -} - -type podSubresourceProxyHandler struct { - proxyFunc func(clusterName logicalcluster.Name, syncerName string, rw http.ResponseWriter, req *http.Request) - apiHandler http.Handler - getPodByName func(ctx context.Context, cluster logicalcluster.Name, namespace, podName string) (*corev1.Pod, error) - getSyncTargetBySynctargetKey func(ctx context.Context, synctargetKey string) (*workloadv1alpha1.SyncTarget, error) -} - -func (b *podSubresourceProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - logger := klog.FromContext(req.Context()) - cluster := request.ClusterFrom(req.Context()) - ctx := req.Context() - requestInfo, ok := request.RequestInfoFrom(ctx) - // If the requestInfo is not present, just return. - if !ok { - b.apiHandler.ServeHTTP(w, req) - return - } - - if !requestInfo.IsResourceRequest { - b.apiHandler.ServeHTTP(w, req) - return - } - - if cluster.Name.Empty() { - b.apiHandler.ServeHTTP(w, req) - return - } - - if requestInfo.Resource != "pods" || requestInfo.Subresource == "" { - b.apiHandler.ServeHTTP(w, req) - return - } - - namespace := requestInfo.Namespace - podName := requestInfo.Name - subresource := requestInfo.Subresource - - // If something is empty, just return.. - if namespace == "" || podName == "" || subresource == "" { - b.apiHandler.ServeHTTP(w, req) - return - } - - // Check if the subresource is valid, for PODs we support exec, log, portforward, proxy , attach and ephemeralcontainers. - if subresource != "exec" && subresource != "log" && subresource != "portforward" && subresource != "proxy" && subresource != "attach" && subresource != "ephemeralcontainers" { - responsewriters.ErrorNegotiated( - apierrors.NewBadRequest(fmt.Sprintf("invalid subresource or not implemented %q", subresource)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Now let's start the proxying - logger.Info("proxying pod subresource", "namespace", namespace, "podName", podName, "subresource", subresource) - - pod, err := b.getPodByName(req.Context(), cluster.Name, namespace, podName) - if apierrors.IsNotFound(err) { - responsewriters.ErrorNegotiated( - apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, podName), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - if err != nil { - responsewriters.ErrorNegotiated( - apierrors.NewInternalError(err), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Let's get the synctargetKey - var synctargetKey string - for k, v := range pod.GetLabels() { - if strings.HasPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - if v == string(workloadv1alpha1.ResourceStateUpsync) { - synctargetKey = strings.TrimPrefix(k, workloadv1alpha1.ClusterResourceStateLabelPrefix) - break - } - } - } - if synctargetKey == "" { - responsewriters.ErrorNegotiated( - apierrors.NewBadRequest(fmt.Sprintf("pod %q is not upsynced", podName)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - synctarget, err := b.getSyncTargetBySynctargetKey(req.Context(), synctargetKey) - if apierrors.IsNotFound(err) { - logger.Error(err, "synctarget not found when trying to proxy subresource", "synctargetKey", synctargetKey, "subresource", subresource, "podName", podName) - responsewriters.ErrorNegotiated( - apierrors.NewServiceUnavailable(fmt.Sprintf("subresource %q is not available right now for pod %q", subresource, podName)), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - if err != nil { - responsewriters.ErrorNegotiated( - apierrors.NewInternalError(err), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Let's find the downstream namespace for the POD - // TODO(jmprusi): This should rely on an annotation in the resource instead of calculating the downstreamNamespace as - // there's a possibility that the namespace name is different from the calculated one (migrations, etc). - downstreamNamespace, err := shared.PhysicalClusterNamespaceName(shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - ClusterName: logicalcluster.From(synctarget).String(), - Name: synctarget.GetName(), - UID: synctarget.GetUID(), - }, - ClusterName: cluster.Name, - Namespace: namespace, - }) - if err != nil { - logger.Error(err, "unable to find downstream namespace for pod", "namespace", namespace, "podName", podName) - responsewriters.ErrorNegotiated( - apierrors.NewInternalError(err), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - - // Rewrite the path to point to the SyncerTunnel proxy path. - podDownstreamURL, err := podSubresourceURL(downstreamNamespace, podName, subresource) - if err != nil { - logger.Error(err, "unable to get syncer tunnel proxy path") - responsewriters.ErrorNegotiated( - apierrors.NewInternalError(err), - errorCodecs, schema.GroupVersion{}, w, req, - ) - return - } - // Set the URL path to the calculated - req.URL.Path = podDownstreamURL.Path - - b.proxyFunc(logicalcluster.From(synctarget), synctarget.GetName(), w, req) -} - -func podSubresourceURL(downstreamNamespaceName, podName, subresource string) (*url.URL, error) { - if downstreamNamespaceName == "" || podName == "" || subresource == "" { - return nil, fmt.Errorf("invalid tunnel path: downstreamNamespaceName=%q, podName=%q, subresource=%q", downstreamNamespaceName, podName, subresource) - } - proxyPath, err := url.JoinPath("/api/v1/namespaces", downstreamNamespaceName, "pods", podName, subresource) - if err != nil { - return nil, err - } - return url.Parse(proxyPath) -} - -// Proxy proxies the request to the syncer identified by the cluster and syncername. -func (tn *tunneler) Proxy(clusterName logicalcluster.Name, syncerName string, rw http.ResponseWriter, req *http.Request) { - d := tn.getDialer(clusterName, syncerName) - if d == nil || isClosedChan(d.Done()) { - rw.Header().Set("Retry-After", "1") - http.Error(rw, "syncer tunnels: tunnel closed", http.StatusServiceUnavailable) - return - } - - target, err := url.Parse("http://" + syncerName) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - return - } - - proxy := httputil.NewSingleHostReverseProxy(target) - proxy.Transport = &http.Transport{ - Proxy: nil, // no proxies - DialContext: d.Dial, // use a reverse connection - ForceAttemptHTTP2: false, // this is a tunneled connection - DisableKeepAlives: true, // one connection per reverse connection - MaxIdleConnsPerHost: -1, - } - - proxy.ServeHTTP(rw, req) -} diff --git a/pkg/tunneler/podsubresourceproxy_handler_test.go b/pkg/tunneler/podsubresourceproxy_handler_test.go deleted file mode 100644 index f2828965dc3..00000000000 --- a/pkg/tunneler/podsubresourceproxy_handler_test.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -import ( - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/endpoints/request" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -func TestPodSubresourceProxyingHandler(t *testing.T) { - tests := map[string]struct { - subresource string - workspace string - podExists bool - podIsUpsynced bool - syncTargetExists bool - synctargetWorkspace string - expectedError string - expectedProxiedPath string - }{ - "valid request with existing pod and synctarget, pod and synctarget on the same workspace": { - subresource: "exec", - workspace: "cluster1", - podExists: true, - syncTargetExists: true, - podIsUpsynced: true, - synctargetWorkspace: "cluster1", - expectedProxiedPath: "/api/v1/namespaces/kcp-xwdjipyflk7g/pods/foo/exec", - }, - "valid request with existing pod and synctarget, pod and synctarget on different workspaces": { - subresource: "exec", - workspace: "cluster1", - podExists: true, - podIsUpsynced: true, - syncTargetExists: true, - synctargetWorkspace: "cluster2", - expectedProxiedPath: "/api/v1/namespaces/kcp-1kdcree89tsy/pods/foo/exec", - }, - "non existing pod": { - subresource: "exec", - workspace: "cluster1", - podExists: false, - expectedError: "404 Not Found", - }, - "non existing synctarget": { - subresource: "exec", - workspace: "cluster1", - podExists: true, - podIsUpsynced: true, - syncTargetExists: false, - expectedError: "503 Service Unavailable", - }, - "valid request but pod is not upsynced": { - subresource: "exec", - workspace: "cluster1", - podExists: true, - podIsUpsynced: false, - syncTargetExists: true, - expectedError: "400 Bad Request", - }, - "invalid subresource, expect error": { - subresource: "invalid", - workspace: "cluster1", - podExists: true, - expectedError: "400 Bad Request", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - ctx := context.Background() - proxiedPath := "" - handler := &podSubresourceProxyHandler{ - proxyFunc: func(cluster logicalcluster.Name, syncTargetName string, w http.ResponseWriter, req *http.Request) { - proxiedPath = req.URL.Path - if tc.syncTargetExists && tc.podExists { - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, nil) - } - }, - apiHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}), - getPodByName: func(ctx context.Context, cluster logicalcluster.Name, namespace, podName string) (*corev1.Pod, error) { - if !tc.podExists { - return nil, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, podName) - } - stateLabel := "Upsync" - if !tc.podIsUpsynced { - stateLabel = "Synced" - } - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: namespace, - Labels: map[string]string{ - "state.workload.kcp.io/ABCDEFGHIJKL": stateLabel, - }, - }, - }, nil - }, - getSyncTargetBySynctargetKey: func(ctx context.Context, synctargetKey string) (*workloadv1alpha1.SyncTarget, error) { - if !tc.syncTargetExists { - return nil, errors.NewNotFound(schema.GroupResource{Resource: "synctargets"}, synctargetKey) - } - return &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{ - Name: "synctarget1", - Annotations: map[string]string{ - "workload.kcp.io/key": "ABCDEFGHIJKL", - "kcp.io/cluster": tc.synctargetWorkspace, - }, - }, - }, nil - }, - } - namespace := "default" - podName := "foo" - path, err := url.JoinPath("/api/v1/namespaces" + namespace + "/pods/" + podName + "/" + tc.subresource) - if err != nil { - t.Fatal(err) - } - r := httptest.NewRequest(http.MethodGet, path, nil).WithContext(request.WithRequestInfo( - request.WithCluster(ctx, request.Cluster{Name: logicalcluster.Name(tc.workspace)}), - &request.RequestInfo{ - Verb: "get", - Resource: "pods", - APIGroup: "", - APIVersion: "v1", - Name: podName, - Namespace: namespace, - IsResourceRequest: true, - Subresource: tc.subresource, - Path: path, - })) - - rw := httptest.NewRecorder() - handler.ServeHTTP(rw, r) - result := rw.Result() - defer result.Body.Close() - bytes, err := io.ReadAll(result.Body) - require.NoError(t, err, "Request body cannot be read") - if tc.expectedError != "" { - require.Equal(t, tc.expectedError, result.Status, "Unexpected status code: %s", string(bytes)) - return - } - require.Equal(t, http.StatusOK, result.StatusCode, "Unexpected status code: %s", string(bytes)) - if tc.expectedProxiedPath != "" { - require.Equal(t, tc.expectedProxiedPath, proxiedPath, "Unexpected proxied path") - } - }) - } -} diff --git a/pkg/tunneler/syncertunnel_handler.go b/pkg/tunneler/syncertunnel_handler.go deleted file mode 100644 index 06b73f6fc03..00000000000 --- a/pkg/tunneler/syncertunnel_handler.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -import ( - "net/http" - "time" - - "github.com/aojea/rwconn" - - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/klog/v2" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// WithSyncerTunnelHandler adds an HTTP Handler that handles reverse connections via the tunnel subresource: -// -// https://host/clusters//apis/workload.kcp.io/v1alpha1/synctargets//tunnel establish reverse connections and queue them so it can be consumed by the dialer -func (tn *tunneler) WithSyncerTunnelHandler(apiHandler http.Handler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - logger := klog.FromContext(ctx) - - ri, ok := genericapirequest.RequestInfoFrom(ctx) - if !ok { - apiHandler.ServeHTTP(w, r) - return - } - - if !ri.IsResourceRequest || - ri.Resource != "synctargets" || - ri.Subresource != "tunnel" || - ri.APIGroup != workloadv1alpha1.SchemeGroupVersion.Group || - ri.APIVersion != workloadv1alpha1.SchemeGroupVersion.Version || - ri.Name == "" { - apiHandler.ServeHTTP(w, r) - return - } - - cluster, err := genericapirequest.ValidClusterFrom(ctx) - if err != nil { - apiHandler.ServeHTTP(w, r) - return - } - - clusterName := cluster.Name - syncerName := ri.Name - - logger = logger.WithValues("cluster", clusterName, "syncerName", syncerName, "action", "tunnel") - logger.V(5).Info("tunneler connection received") - d := tn.getDialer(clusterName, syncerName) - // First flush response headers - flusher, ok := w.(http.Flusher) - if !ok { - http.Error(w, "flusher not implemented", http.StatusInternalServerError) - return - } - - // first connection to register the dialer and start the control loop - fw := &flushWriter{w: w, f: flusher} - doneCh := make(chan struct{}) - conn := rwconn.NewConn(r.Body, fw, rwconn.SetWriteDelay(500*time.Millisecond), rwconn.SetCloseHook(func() { - // exit the handler - close(doneCh) - })) - if d == nil || isClosedChan(d.Done()) { - // start clean - tn.deleteDialer(clusterName, syncerName) - tn.createDialer(clusterName, syncerName, conn) - // start control loop - select { - case <-r.Context().Done(): - conn.Close() - case <-doneCh: - } - logger.V(5).Info("stopped tunnel control connection") - return - } - logger.Info("Creating tunnel connection", "clustername", clusterName, "syncername", syncerName) - // create a reverse connection - logger.V(5).Info("tunnel connection started") - select { - case d.incomingConn <- conn: - case <-d.Done(): - http.Error(w, "syncer tunnels: tunnel closed", http.StatusInternalServerError) - return - } - // keep the handler alive until the connection is closed - select { - case <-r.Context().Done(): - conn.Close() - case <-doneCh: - } - logger.V(5).Info("tunnel connection done", "remoteAddr", r.RemoteAddr) - } -} diff --git a/pkg/tunneler/tunnel.go b/pkg/tunneler/tunnel.go deleted file mode 100644 index 21973da68f4..00000000000 --- a/pkg/tunneler/tunnel.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -import ( - "fmt" - "io" - "net" - "net/http" - "net/url" - "sync" - - "github.com/kcp-dev/logicalcluster/v3" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -const ( - tunnelSubresourcePath = "tunnel" -) - -type controlMsg struct { - Command string `json:"command,omitempty"` // "keep-alive", "conn-ready", "pickup-failed" - ConnPath string `json:"connPath,omitempty"` // conn pick-up URL path for "conn-url", "pickup-failed" - Err string `json:"err,omitempty"` -} - -type key struct { - clusterName logicalcluster.Name - syncTargetName string -} - -// tunneler contains a pool of Dialers to create reverse connections -// based on the cluster and syncer name. -type tunneler struct { - mu sync.Mutex - pool map[key]*Dialer -} - -func NewTunneler() *tunneler { - return &tunneler{ - pool: make(map[key]*Dialer), - mu: sync.Mutex{}, - } -} - -// getDialer returns a reverse dialer for the id. -func (tn *tunneler) getDialer(clusterName logicalcluster.Name, syncTargetName string) *Dialer { - tn.mu.Lock() - defer tn.mu.Unlock() - id := key{clusterName, syncTargetName} - return tn.pool[id] -} - -// createDialer creates a reverse dialer with id -// it's a noop if a dialer already exists. -func (tn *tunneler) createDialer(clusterName logicalcluster.Name, syncTargetName string, conn net.Conn) *Dialer { - tn.mu.Lock() - defer tn.mu.Unlock() - id := key{clusterName, syncTargetName} - if d, ok := tn.pool[id]; ok { - return d - } - d := NewDialer(conn) - tn.pool[id] = d - return d -} - -// deleteDialer deletes the reverse dialer for a given id. -func (tn *tunneler) deleteDialer(clusterName logicalcluster.Name, syncTargetName string) { - tn.mu.Lock() - defer tn.mu.Unlock() - id := key{clusterName, syncTargetName} - delete(tn.pool, id) -} - -// syncerTunnelURL builds the destination url with the Dialer expected format of the URL. -func SyncerTunnelURL(host, ws, target string) (string, error) { - if target == "" || ws == "" { - return "", fmt.Errorf("target or ws can not be empty") - } - hostURL, err := url.Parse(host) - if err != nil || hostURL.Scheme != "https" || hostURL.Host == "" { - return "", fmt.Errorf("wrong url format, expected https://host<:port>/: %w", err) - } - return url.JoinPath(hostURL.String(), "clusters", ws, "apis", workloadv1alpha1.SchemeGroupVersion.String(), "synctargets", target) -} - -// flushWriter. -type flushWriter struct { - w io.Writer - f http.Flusher -} - -func (w *flushWriter) Write(data []byte) (int, error) { - n, err := w.w.Write(data) - w.f.Flush() - return n, err -} - -func (w *flushWriter) Close() error { - return nil -} - -func isClosedChan(c <-chan struct{}) bool { - select { - case <-c: - return true - default: - return false - } -} diff --git a/pkg/tunneler/tunnel_test.go b/pkg/tunneler/tunnel_test.go deleted file mode 100644 index 062ddc5bfaa..00000000000 --- a/pkg/tunneler/tunnel_test.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tunneler - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSyncerTunnelURL(t *testing.T) { - tests := map[string]struct { - host string - workspace string - target string - expected string - expectError bool - }{ - "valid host, no ports": { - host: "https://example.com", - workspace: "root:testing:testing", - target: "cluster1", - expected: "https://example.com/clusters/root:testing:testing/apis/workload.kcp.io/v1alpha1/synctargets/cluster1", - }, - "valid host, with ports": { - host: "https://example.com:443", - workspace: "root:testing:testing", - target: "cluster1", - expected: "https://example.com:443/clusters/root:testing:testing/apis/workload.kcp.io/v1alpha1/synctargets/cluster1", - }, - "invalid host": { - host: "example.com:443:443", - workspace: "root:testing:testing", - target: "cluster1", - expectError: true, - }, - "invalid host, no scheme": { - host: "example.com:443:443", - workspace: "root:testing:testing", - target: "cluster1", - expectError: true, - }, - "invalid host, no scheme, no port": { - host: "example.com:443:443", - workspace: "root:testing:testing", - target: "cluster1", - expectError: true, - }, - "invalid host, no scheme, no port, no host": { - host: ":443:443", - workspace: "root:testing:testing", - target: "cluster1", - expectError: true, - }, - "empty host": { - host: "", - workspace: "root:testing:testing", - target: "cluster1", - expectError: true, - }, - "empty workspace": { - host: "example.com:443", - workspace: "", - target: "cluster1", - expectError: true, - }, - "empty target": { - host: "example.com:443", - workspace: "root:testing:testing", - target: "", - expectError: true, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got, err := SyncerTunnelURL(tc.host, tc.workspace, tc.target) - if tc.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tc.expected, got) - } - }) - } -} diff --git a/pkg/virtual/framework/internalapis/fixtures/synctargets.yaml b/pkg/virtual/framework/internalapis/fixtures/synctargets.yaml deleted file mode 100644 index 764125a93d0..00000000000 --- a/pkg/virtual/framework/internalapis/fixtures/synctargets.yaml +++ /dev/null @@ -1,205 +0,0 @@ -metadata: - creationTimestamp: null - name: internal.synctargets.workload.kcp.io -spec: - group: workload.kcp.io - names: - kind: SyncTarget - plural: synctargets - singular: synctarget - scope: Cluster - versions: - - name: v1alpha1 - schema: - description: SyncTarget describes a member cluster capable of running workloads. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec holds the desired state. - properties: - cells: - additionalProperties: - type: string - description: Cells is a set of labels to identify the cells the SyncTarget - belongs to. SyncTargets with the same cells run as they are in the - same physical cluster. Each key/value pair in the cells should be - added and updated by service providers (i.e. a network provider updates - one key/value, while the storage provider updates another.) - type: object - evictAfter: - description: EvictAfter controls cluster schedulability of new and existing - workloads. After the EvictAfter time, any workload scheduled to the - cluster will be unassigned from the cluster. By default, workloads - scheduled to the cluster are not evicted. - format: date-time - type: string - supportedAPIExports: - description: SupportedAPIExports defines a set of APIExports supposed - to be supported by this SyncTarget. The SyncTarget will be selected - to deploy the workload only when the resource schema on the SyncTarget - is compatible with the resource schema included in the exports. - items: - description: APIExportReference provides the fields necessary to resolve - an APIExport. - properties: - export: - description: export is the name of the APIExport. - type: string - path: - description: path is the fully-qualified path to the workspace - containing the APIExport. If it is empty, the current workspace - is assumed. - type: string - required: - - export - type: object - type: array - unschedulable: - description: Unschedulable controls cluster schedulability of new workloads. - By default, cluster is schedulable. - type: boolean - type: object - status: - description: Status communicates the observed state. - properties: - allocatable: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Allocatable represents the resources that are available - for scheduling. - type: object - capacity: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: Capacity represents the total resources of the cluster. - type: object - conditions: - description: Current processing state of the SyncTarget. - items: - description: Condition defines an observation of a object operational - state. - properties: - lastTransitionTime: - description: Last time the condition transitioned from one status - to another. This should be when the underlying condition changed. - If that is not known, then using the time when the API field - changed is acceptable. - format: date-time - type: string - message: - description: A human readable message indicating details about - the transition. This field may be empty. - type: string - reason: - description: The reason for the condition's last transition in - CamelCase. The specific API may choose whether or not this field - is considered a guaranteed API. This field may not be empty. - type: string - severity: - description: Severity provides an explicit classification of Reason - code, so the users or machines can immediately understand the - current situation and act accordingly. The Severity field MUST - be set only when Status=False. - type: string - status: - description: Status of the condition, one of True, False, Unknown. - type: string - type: - description: Type of condition in CamelCase or in foo.example.com/CamelCase. - Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. - type: string - required: - - type - - status - - lastTransitionTime - type: object - type: array - lastSyncerHeartbeatTime: - description: A timestamp indicating when the syncer last reported status. - format: date-time - type: string - syncedResources: - description: SyncedResources represents the resources that the syncer - of the SyncTarget can sync. It MUST be updated by kcp server. - items: - properties: - identityHash: - description: identityHash is the identity for a given APIExport - that the APIResourceSchema belongs to. The hash can be found - on APIExport and APIResourceSchema's status. It will be empty - for core types. - type: string - state: - description: state indicate whether the resources schema is compatible - to the SyncTarget. It must be updated by syncer after checking - the API compatibility on SyncTarget. - type: string - versions: - description: versions are the resource versions the syncer can - choose to sync depending on availability on the downstream cluster. - Conversion to the storage version, if necessary, will be done - on the kcp side. The versions are ordered by precedence and - the first version compatible is preferred by syncer. - items: - type: string - type: array - required: - - versions - type: object - type: array - tunnelWorkspaces: - description: TunnelWorkspaces contains all URLs (one per shard) that - point to the SyncTarget workspace in order to setup the tunneler. - items: - properties: - url: - description: url is the URL the Syncer should use to connect to - the Syncer tunnel for a given shard. - type: string - required: - - url - type: object - type: array - virtualWorkspaces: - description: VirtualWorkspaces contains all virtual workspace URLs. - items: - properties: - syncerURL: - description: SyncerURL is the URL of the syncer virtual workspace. - type: string - upsyncerURL: - description: UpsyncerURL is the URL of the upsyncer virtual workspace. - type: string - required: - - syncerURL - - upsyncerURL - type: object - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml b/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml new file mode 100644 index 00000000000..9434e25112b --- /dev/null +++ b/pkg/virtual/framework/internalapis/fixtures/workspaces.yaml @@ -0,0 +1,178 @@ +metadata: + creationTimestamp: null + name: internal.workspaces.tenancy.kcp.io +spec: + group: tenancy.kcp.io + names: + kind: Workspace + plural: workspaces + singular: workspace + scope: Cluster + versions: + - name: v1alpha1 + schema: + description: |- + Workspace defines a generic Kubernetes-cluster-like endpoint, with standard Kubernetes discovery APIs, OpenAPI and resource API endpoints. + + A workspace can be backed by different concrete types of workspace implementation, depending on access pattern. All workspace implementations share the characteristic that the URL that serves a given workspace can be used with standard Kubernetes API machinery and client libraries and command line tools. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: WorkspaceSpec holds the desired state of the Workspace. + properties: + URL: + description: |- + URL is the address under which the Kubernetes-cluster-like endpoint can be found. This URL can be used to access the workspace with standard Kubernetes client libraries and command line tools. + + Set by the system. + type: string + cluster: + description: |- + cluster is the name of the logical cluster this workspace is stored under. + + Set by the system. + type: string + location: + description: |- + location constraints where this workspace can be scheduled to. + + If the no location is specified, an arbitrary location is chosen. + properties: + selector: + description: selector is a label selector that filters workspace + scheduling targets. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + type: object + type: + description: |- + type defines properties of the workspace both on creation (e.g. initial resources and initially installed APIs) and during runtime (e.g. permissions). If no type is provided, the default type for the workspace in which this workspace is nesting will be used. + + The type is a reference to a WorkspaceType in the listed workspace, but lower-cased. The WorkspaceType existence is validated at admission during creation. The type is immutable after creation. The use of a type is gated via the RBAC workspacetypes/use resource permission. + properties: + name: + description: name is the name of the WorkspaceType + type: string + path: + description: path is an absolute reference to the workspace that + owns this type, e.g. root:org:ws. + type: string + required: + - name + type: object + type: object + status: + description: WorkspaceStatus communicates the observed state of the Workspace. + properties: + conditions: + description: Current processing state of the Workspace. + items: + description: Condition defines an observation of a object operational + state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in + CamelCase. The specific API may choose whether or not this field + is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason + code, so the users or machines can immediately understand the + current situation and act accordingly. The Severity field MUST + be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. + type: string + required: + - type + - status + - lastTransitionTime + type: object + type: array + initializers: + description: initializers must be cleared by a controller before the + workspace is ready and can be used. + items: + type: string + type: array + phase: + description: Phase of the workspace (Scheduling, Initializing, Ready). + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/pkg/virtual/framework/internalapis/import_test.go b/pkg/virtual/framework/internalapis/import_test.go index fc19283a2ee..98f7a4c2a6f 100644 --- a/pkg/virtual/framework/internalapis/import_test.go +++ b/pkg/virtual/framework/internalapis/import_test.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/yaml" kcpopenapi "github.com/kcp-dev/kcp/pkg/openapi" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" + tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" ) //go:embed fixtures/*.yaml @@ -67,21 +67,21 @@ func TestImportInternalAPIs(t *testing.T) { }, { Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "synctargets", - Singular: "synctarget", - Kind: "SyncTarget", + Plural: "workspaces", + Singular: "workspace", + Kind: "Workspace", }, - GroupVersion: schema.GroupVersion{Group: "workload.kcp.io", Version: "v1alpha1"}, - Instance: &workloadv1alpha1.SyncTarget{}, + GroupVersion: schema.GroupVersion{Group: "tenancy.kcp.io", Version: "v1alpha1"}, + Instance: &tenancyv1alpha1.Workspace{}, ResourceScope: apiextensionsv1.ClusterScoped, HasStatus: true, }, } - workloadScheme := runtime.NewScheme() - err := workloadv1alpha1.AddToScheme(workloadScheme) + tenancyScheme := runtime.NewScheme() + err := tenancyv1alpha1.AddToScheme(tenancyScheme) require.NoError(t, err) schemas, err := CreateAPIResourceSchemas( - []*runtime.Scheme{clientgoscheme.Scheme, workloadScheme}, + []*runtime.Scheme{clientgoscheme.Scheme, tenancyScheme}, []common.GetOpenAPIDefinitions{k8sopenapi.GetOpenAPIDefinitions, kcpopenapi.GetOpenAPIDefinitions}, apisToImport...) require.NoError(t, err) diff --git a/sdk/apis/scheduling/OWNERS b/sdk/apis/scheduling/OWNERS deleted file mode 100644 index 8081b4bfd7b..00000000000 --- a/sdk/apis/scheduling/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -reviewers: -- qiujian16 diff --git a/sdk/apis/scheduling/register.go b/sdk/apis/scheduling/register.go deleted file mode 100644 index 8491400e2c8..00000000000 --- a/sdk/apis/scheduling/register.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduling - -const ( - GroupName = "scheduling.kcp.io" -) diff --git a/sdk/apis/scheduling/v1alpha1/doc.go b/sdk/apis/scheduling/v1alpha1/doc.go deleted file mode 100644 index 9c69f5116e7..00000000000 --- a/sdk/apis/scheduling/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +groupName=scheduling.kcp.io -// +k8s:openapi-gen=true -package v1alpha1 diff --git a/sdk/apis/scheduling/v1alpha1/register.go b/sdk/apis/scheduling/v1alpha1/register.go deleted file mode 100644 index 980a9ca6445..00000000000 --- a/sdk/apis/scheduling/v1alpha1/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kcp-dev/kcp/sdk/apis/scheduling" -) - -// SchemeGroupVersion is group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: scheduling.GroupName, Version: "v1alpha1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind. -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource. -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Location{}, - &LocationList{}, - &Placement{}, - &PlacementList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/sdk/apis/scheduling/v1alpha1/types_location.go b/sdk/apis/scheduling/v1alpha1/types_location.go deleted file mode 100644 index d69211d31f1..00000000000 --- a/sdk/apis/scheduling/v1alpha1/types_location.go +++ /dev/null @@ -1,169 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - // LocationLabelsStringAnnotationKey is the label key for the label holding a string - // representation of the location labels in order to use them in a table column in the CLI. - LocationLabelsStringAnnotationKey = "scheduling.kcp.io/labels" - - // PlacementAnnotationKey is the label key for a namespace that indicates the namespace's labels match the selector - // in at least one ready Placement. - PlacementAnnotationKey = "scheduling.kcp.io/placement" -) - -// Location represents a set of instances of a scheduling resource type acting a target -// of scheduling. -// -// The location is chosen by the user (in the future) through a Placement object, while -// the instance is chosen by the scheduler depending on considerations like load -// or available resources, or further node selectors specified by the user. -// -// +crd -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories=kcp -// +kubebuilder:printcolumn:name="Resource",type=string,JSONPath=`.spec.resource.resource`,description="Type of the workspace" -// +kubebuilder:printcolumn:name="Available",type=string,JSONPath=`.status.availableInstances`,description="Available instances in this location" -// +kubebuilder:printcolumn:name="Instances",type=string,JSONPath=`.status.instances`,description="Instances in this location" -// +kubebuilder:printcolumn:name="Labels",type=string,JSONPath=`.metadata.annotations['scheduling\.kcp\.dev/labels']`,description="The common labels of this location" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -type Location struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec LocationSpec `json:"spec,omitempty"` - - // +optional - Status LocationStatus `json:"status,omitempty"` -} - -// LocationSpec holds the desired state of the Location. -type LocationSpec struct { - // resource is the group-version-resource of the instances that are subject to this location. - // - // +required - // +kubebuilder:validation:Required - Resource GroupVersionResource `json:"resource"` - - // description is a human-readable description of the location. - // - // +optional - Description string `json:"description,omitempty"` - - // availableSelectorLabels is a list of labels that can be used to select an - // instance at this location in a placement object. - // - // +listType=map - // +listMapKey=key - AvailableSelectorLabels []AvailableSelectorLabel `json:"availableSelectorLabels,omitempty"` - - // instanceSelector chooses the instances that will be part of this location. - // - // Note that these labels are not what is shown in the Location objects to - // the user. Depending on context, both will match or won't match. - // - // +optional - InstanceSelector *metav1.LabelSelector `json:"instanceSelector,omitempty"` -} - -// GroupVersionResource unambiguously identifies a resource. -type GroupVersionResource struct { - // group is the name of an API group. - // - // +kubebuilder:validation:Pattern=`^(|[a-z0-9]([-a-z0-9]*[a-z0-9](\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?)$` - // +kubebuilder:validation:Enum="workload.kcp.io" - // +optional - Group string `json:"group,omitempty"` - - // version is the version of the API. - // - // +kubebuilder:validation:Pattern=`^[a-z][-a-z0-9]*[a-z0-9]$` - // +kubebuilder:validation:MinLength:1 - // +kubebuilder:validation:Enum="v1alpha1" - // +required - // +kubebuilder:validation:Required - Version string `json:"version"` - - // resource is the name of the resource. - // +kubebuilder:validation:Pattern=`^[a-z][-a-z0-9]*[a-z0-9]$` - // +kubebuilder:validation:MinLength:1 - // +kubebuilder:validation:Enum="synctargets" - // +required - // +kubebuilder:validation:Required - Resource string `json:"resource"` -} - -// AvailableSelectorLabel specifies a label with key name and possible values. -type AvailableSelectorLabel struct { - // key is the name of the label. - // - // +required - // +kubebuilder:validation:Required - Key LabelKey `json:"key"` - - // values are the possible values for this labels. - // - // +kubebuilder:validation:MinItems=1 - // +required - // +kubebuilder:validation:Required - // +listType=set - Values []LabelValue `json:"values"` - - // description is a human readable description of the label. - // - // +optional - Description string `json:"description,omitempty"` -} - -// LabelKey is a key for a label. -// -// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9](\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?/)?([a-zA-Z0-9][-a-zA-Z0-9_.]{0,61})?[a-zA-Z0-9]$` -// +kubebuilder:validation:MaxLength=255 -type LabelKey string - -// LabelValue specifies a value of a label. -// -// +kubebuilder:validation:Pattern=`^(|([a-z0-9]([-a-z0-9]*[a-z0-9](\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)?/)?([a-zA-Z0-9][-a-zA-Z0-9_.]{0,61})?[a-zA-Z0-9])$` -// +kubebuilder:validation:MaxLength=63 -type LabelValue string - -// LocationStatus defines the observed state of Location. -type LocationStatus struct { - // instances is the number of actual instances at this location. - Instances *uint32 `json:"instances,omitempty"` - - // available is the number of actual instances that are available at this location. - AvailableInstances *uint32 `json:"availableInstances,omitempty"` -} - -// LocationList is a list of locations. -// -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type LocationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Location `json:"items"` -} diff --git a/sdk/apis/scheduling/v1alpha1/types_placement.go b/sdk/apis/scheduling/v1alpha1/types_placement.go deleted file mode 100644 index caa584e0553..00000000000 --- a/sdk/apis/scheduling/v1alpha1/types_placement.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -// Placement defines a selection rule to choose ONE location for MULTIPLE namespaces in a workspace. -// -// placement is in Pending state initially. When a location is selected by the placement, the placement -// turns to Unbound state. In Pending or Unbound state, the selection rule can be updated to select another location. -// When the a namespace is annotated by another controller or user with the key of "scheduling.kcp.io/placement", -// the namespace will pick one placement, and this placement is transferred to Bound state. Any update to spec of the placement -// is ignored in Bound state and reflected in the conditions. The placement will turn back to Unbound state when no namespace -// uses this placement any more. -// -// +crd -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories=kcp -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -type Placement struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PlacementSpec `json:"spec,omitempty"` - - // +optional - Status PlacementStatus `json:"status,omitempty"` -} - -func (in *Placement) SetConditions(c conditionsv1alpha1.Conditions) { - in.Status.Conditions = c -} - -func (in *Placement) GetConditions() conditionsv1alpha1.Conditions { - return in.Status.Conditions -} - -var _ conditions.Getter = &Placement{} -var _ conditions.Setter = &Placement{} - -type PlacementSpec struct { - // locationSelectors represents a slice of label selector to select a location, these label selectors - // are logically ORed. - LocationSelectors []metav1.LabelSelector `json:"locationSelectors,omitempty"` - - // locationResource is the group-version-resource of the instances that are subject to the locations to select. - // - // +required - // +kubebuilder:validation:Required - LocationResource GroupVersionResource `json:"locationResource"` - - // namespaceSelector is a label selector to select ns. It match all ns by default, but can be specified to - // a certain set of ns. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` - - // locationWorkspace is an absolute reference to a workspace for the location. If it is not set, the workspace of - // APIBinding will be used. - // +optional - // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - LocationWorkspace string `json:"locationWorkspace,omitempty"` -} - -type PlacementStatus struct { - // phase is the current phase of the placement - // - // +kubebuilder:default=Pending - // +kubebuilder:validation:Enum=Pending;Bound;Unbound - Phase PlacementPhase `json:"phase,omitempty"` - - // selectedLocation is the location that a picked by this placement. - // +optional - SelectedLocation *LocationReference `json:"selectedLocation,omitempty"` - - // Current processing state of the Placement. - // +optional - Conditions conditionsv1alpha1.Conditions `json:"conditions,omitempty"` -} - -// LocationReference describes a location that are provided in the specified Workspace. -type LocationReference struct { - // path is an absolute reference to a workspace, e.g. root:org:ws. The workspace must - // be some ancestor or a child of some ancestor. - // - // +required - // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?(:[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" - Path string `json:"path"` - - // Name of the Location. - // - // +required - // +kubebuilder:validation:Required - // +kube:validation:MinLength=1 - LocationName string `json:"locationName"` -} - -type PlacementPhase string - -const ( - // PlacementPending is the phase that the location has not been selected for this placement. - PlacementPending = "Pending" - - // PlacementUnbound is the phase that the location has been selected by the placement, but - // no namespace is bound to this placement yet. - PlacementUnbound = "Unbound" - - // PlacementBound is the phase that the location has been selected by the placement, and at - // least one namespace has been bound to this placement. - PlacementBound = "Bound" -) - -const ( - // PlacementReady is a condition type for placement representing that the placement is ready - // for scheduling. The placement is NOT ready when location cannot be found for the placement, - // or the selected location does not match the placement spec. - PlacementReady conditionsv1alpha1.ConditionType = "Ready" - - // LocationNotFoundReason is a reason for PlacementReady condition that a location cannot be - // found for this placement. - LocationNotFoundReason = "LocationNotFound" - - // LocationInvalidReason is a reason for PlacementReady condition that a location is not valid - // for this placement anymore. - LocationInvalidReason = "LocationInvalid" - - // LocationNotMatchReason is a reason for PlacementReady condition that no matched location for - // this placement can be found. - LocationNotMatchReason = "LocationNoMatch" - - // PlacementScheduled is a condition type for placement representing that a scheduling decision is - // made. The placement is NOT Scheduled when no valid schedule decision is available or an error - // occurs. - PlacementScheduled conditionsv1alpha1.ConditionType = "Scheduled" - - // ScheduleLocationNotFound is a reason for PlacementScheduled condition that location is not available for scheduling. - ScheduleLocationNotFound = "ScheduleLocationNotFound" - - // ScheduleNoValidTargetReason is a reason for PlacementScheduled condition that no valid target is scheduled - // for this placement. - ScheduleNoValidTargetReason = "NoValidTarget" -) - -// PlacementList is a list of locations. -// -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PlacementList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Placement `json:"items"` -} diff --git a/sdk/apis/scheduling/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/scheduling/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 1af84c24f5d..00000000000 --- a/sdk/apis/scheduling/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,316 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvailableSelectorLabel) DeepCopyInto(out *AvailableSelectorLabel) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]LabelValue, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailableSelectorLabel. -func (in *AvailableSelectorLabel) DeepCopy() *AvailableSelectorLabel { - if in == nil { - return nil - } - out := new(AvailableSelectorLabel) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. -func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { - if in == nil { - return nil - } - out := new(GroupVersionResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Location) DeepCopyInto(out *Location) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Location. -func (in *Location) DeepCopy() *Location { - if in == nil { - return nil - } - out := new(Location) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Location) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocationList) DeepCopyInto(out *LocationList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Location, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationList. -func (in *LocationList) DeepCopy() *LocationList { - if in == nil { - return nil - } - out := new(LocationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LocationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocationReference) DeepCopyInto(out *LocationReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationReference. -func (in *LocationReference) DeepCopy() *LocationReference { - if in == nil { - return nil - } - out := new(LocationReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocationSpec) DeepCopyInto(out *LocationSpec) { - *out = *in - out.Resource = in.Resource - if in.AvailableSelectorLabels != nil { - in, out := &in.AvailableSelectorLabels, &out.AvailableSelectorLabels - *out = make([]AvailableSelectorLabel, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InstanceSelector != nil { - in, out := &in.InstanceSelector, &out.InstanceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationSpec. -func (in *LocationSpec) DeepCopy() *LocationSpec { - if in == nil { - return nil - } - out := new(LocationSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocationStatus) DeepCopyInto(out *LocationStatus) { - *out = *in - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = new(uint32) - **out = **in - } - if in.AvailableInstances != nil { - in, out := &in.AvailableInstances, &out.AvailableInstances - *out = new(uint32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocationStatus. -func (in *LocationStatus) DeepCopy() *LocationStatus { - if in == nil { - return nil - } - out := new(LocationStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { - if in == nil { - return nil - } - out := new(Placement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Placement) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementList) DeepCopyInto(out *PlacementList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Placement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementList. -func (in *PlacementList) DeepCopy() *PlacementList { - if in == nil { - return nil - } - out := new(PlacementList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PlacementList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementSpec) DeepCopyInto(out *PlacementSpec) { - *out = *in - if in.LocationSelectors != nil { - in, out := &in.LocationSelectors, &out.LocationSelectors - *out = make([]v1.LabelSelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.LocationResource = in.LocationResource - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementSpec. -func (in *PlacementSpec) DeepCopy() *PlacementSpec { - if in == nil { - return nil - } - out := new(PlacementSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PlacementStatus) DeepCopyInto(out *PlacementStatus) { - *out = *in - if in.SelectedLocation != nil { - in, out := &in.SelectedLocation, &out.SelectedLocation - *out = new(LocationReference) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(conditionsv1alpha1.Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementStatus. -func (in *PlacementStatus) DeepCopy() *PlacementStatus { - if in == nil { - return nil - } - out := new(PlacementStatus) - in.DeepCopyInto(out) - return out -} diff --git a/sdk/apis/workload/helpers/syncintent.go b/sdk/apis/workload/helpers/syncintent.go deleted file mode 100644 index 320162e7fe1..00000000000 --- a/sdk/apis/workload/helpers/syncintent.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helpers - -import ( - "fmt" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// SyncIntent gathers all the information related to the Syncing of -// a resource to a given SynTarget. This information comes from labels and -// annotations. -type SyncIntent struct { - // ResourceState is the requested syncing state for this SyncTarget. - // It is read on the state.workload.kcp.io/ label - ResourceState v1alpha1.ResourceState - - // DeletionTimestamp is the parsed timestamp coming from the content of - // the deletion.internal.workload.kcp.io/ annotation. - // It expresses the timestamped intent that a resource should be removed - // the given SyncTarget - DeletionTimestamp *metav1.Time - - // Finalizers is the list of "soft" finalizers defined for this resource - // and this SyncTarget. - // It is read on the finalizers.workload.kcp.io/ annotation. - Finalizers string -} - -// GetSyncIntents gathers, for each SyncTarget, all the information related -// to the Syncing of the resource to this SyncTarget. -// This information comes from labels and annotations. -// Keys in the returns map are SyncTarget keys. -func GetSyncIntents(upstreamResource metav1.Object) (map[string]SyncIntent, error) { - labels := upstreamResource.GetLabels() - syncing := make(map[string]SyncIntent, len(labels)) - annotations := upstreamResource.GetAnnotations() - for labelName, labelValue := range labels { - if strings.HasPrefix(labelName, v1alpha1.ClusterResourceStateLabelPrefix) { - syncTarget := strings.TrimPrefix(labelName, v1alpha1.ClusterResourceStateLabelPrefix) - syncTargetSyncing := SyncIntent{ - ResourceState: v1alpha1.ResourceState(labelValue), - } - if deletionAnnotation, exists := annotations[v1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+syncTarget]; exists { - var deletionTimestamp metav1.Time - if err := deletionTimestamp.UnmarshalText([]byte(deletionAnnotation)); err != nil { - return nil, fmt.Errorf("parsing of the deletion annotation for sync target %q failed: %w", syncTarget, err) - } else { - syncTargetSyncing.DeletionTimestamp = &deletionTimestamp - } - } - if finalizersAnnotation, exists := annotations[v1alpha1.ClusterFinalizerAnnotationPrefix+syncTarget]; exists { - syncTargetSyncing.Finalizers = finalizersAnnotation - } - syncing[syncTarget] = syncTargetSyncing - } - } - return syncing, nil -} diff --git a/sdk/apis/workload/register.go b/sdk/apis/workload/register.go deleted file mode 100644 index 6709102eb6f..00000000000 --- a/sdk/apis/workload/register.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workload - -const ( - GroupName = "workload.kcp.io" -) diff --git a/sdk/apis/workload/v1alpha1/doc.go b/sdk/apis/workload/v1alpha1/doc.go deleted file mode 100644 index 52e57cb2f23..00000000000 --- a/sdk/apis/workload/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +groupName=workload.kcp.io -// +k8s:openapi-gen=true -package v1alpha1 diff --git a/sdk/apis/workload/v1alpha1/helpers.go b/sdk/apis/workload/v1alpha1/helpers.go deleted file mode 100644 index 1a91adf0239..00000000000 --- a/sdk/apis/workload/v1alpha1/helpers.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "crypto/sha256" - "math/big" - - "github.com/kcp-dev/logicalcluster/v3" -) - -// ToSyncTargetKey hashes the SyncTarget workspace and the SyncTarget name to a string that is used to identify -// in a unique way the synctarget in annotations/labels/finalizers. -func ToSyncTargetKey(clusterName logicalcluster.Name, syncTargetName string) string { - hash := sha256.Sum224([]byte(clusterName.Path().Join(syncTargetName).String())) - base62hash := toBase62(hash) - return base62hash -} - -func toBase62(hash [28]byte) string { - var i big.Int - i.SetBytes(hash[:]) - return i.Text(62) -} diff --git a/sdk/apis/workload/v1alpha1/register.go b/sdk/apis/workload/v1alpha1/register.go deleted file mode 100644 index ec159c316ab..00000000000 --- a/sdk/apis/workload/v1alpha1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kcp-dev/kcp/sdk/apis/workload" -) - -// SchemeGroupVersion is group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: workload.GroupName, Version: "v1alpha1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind. -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource. -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SyncTarget{}, - &SyncTargetList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/sdk/apis/workload/v1alpha1/register_test.go b/sdk/apis/workload/v1alpha1/register_test.go deleted file mode 100644 index 10557c40ac5..00000000000 --- a/sdk/apis/workload/v1alpha1/register_test.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "testing" - - "k8s.io/apimachinery/pkg/runtime" -) - -func TestRegisterHelpers(t *testing.T) { - if got, want := Kind("Foo"), "Foo.workload.kcp.io"; got.String() != want { - t.Errorf("Kind(Foo) = %v, want %v", got.String(), want) - } - - if got, want := Resource("Foo"), "Foo.workload.kcp.io"; got.String() != want { - t.Errorf("Resource(Foo) = %v, want %v", got.String(), want) - } - - if got, want := SchemeGroupVersion.String(), "workload.kcp.io/v1alpha1"; got != want { - t.Errorf("SchemeGroupVersion() = %v, want %v", got, want) - } - - scheme := runtime.NewScheme() - if err := addKnownTypes(scheme); err != nil { - t.Errorf("addKnownTypes() = %v", err) - } -} diff --git a/sdk/apis/workload/v1alpha1/synctarget_types.go b/sdk/apis/workload/v1alpha1/synctarget_types.go deleted file mode 100644 index 1b4513043e5..00000000000 --- a/sdk/apis/workload/v1alpha1/synctarget_types.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" -) - -// SyncTarget describes a member cluster capable of running workloads. -// -// +crd -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:subresource:status -// +kubebuilder:resource:scope=Cluster,categories=kcp -// +kubebuilder:printcolumn:name="Location",type="string",JSONPath=`.metadata.name`,priority=1 -// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type=="Ready")].status`,priority=2 -// +kubebuilder:printcolumn:name="Synced API resources",type="string",JSONPath=`.status.syncedResources`,priority=3 -// +kubebuilder:printcolumn:name="Key",type="string",JSONPath=`.metadata.labels['internal\.workload\.kcp\.dev/key']`,priority=4 -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" -type SyncTarget struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the desired state. - // +optional - Spec SyncTargetSpec `json:"spec,omitempty"` - - // Status communicates the observed state. - // +optional - Status SyncTargetStatus `json:"status,omitempty"` -} - -var _ conditions.Getter = &SyncTarget{} -var _ conditions.Setter = &SyncTarget{} - -// SyncTargetSpec holds the desired state of the SyncTarget (from the client). -type SyncTargetSpec struct { - // Unschedulable controls cluster schedulability of new workloads. By - // default, cluster is schedulable. - // +optional - // +kubebuilder:default=false - Unschedulable bool `json:"unschedulable"` - - // EvictAfter controls cluster schedulability of new and existing workloads. - // After the EvictAfter time, any workload scheduled to the cluster - // will be unassigned from the cluster. - // By default, workloads scheduled to the cluster are not evicted. - EvictAfter *metav1.Time `json:"evictAfter,omitempty"` - - // SupportedAPIExports defines a set of APIExports supposed to be supported by this SyncTarget. The SyncTarget - // will be selected to deploy the workload only when the resource schema on the SyncTarget is compatible - // with the resource schema included in the exports. - SupportedAPIExports []tenancyv1alpha1.APIExportReference `json:"supportedAPIExports,omitempty"` - - // Cells is a set of labels to identify the cells the SyncTarget belongs to. SyncTargets with the same cells run as - // they are in the same physical cluster. Each key/value pair in the cells should be added and updated by service providers - // (i.e. a network provider updates one key/value, while the storage provider updates another.) - Cells map[string]string `json:"cells,omitempty"` -} - -// SyncTargetStatus communicates the observed state of the SyncTarget (from the controller). -type SyncTargetStatus struct { - - // Allocatable represents the resources that are available for scheduling. - // +optional - Allocatable *corev1.ResourceList `json:"allocatable,omitempty"` - - // Capacity represents the total resources of the cluster. - // +optional - Capacity *corev1.ResourceList `json:"capacity,omitempty"` - - // Current processing state of the SyncTarget. - // +optional - Conditions conditionsv1alpha1.Conditions `json:"conditions,omitempty"` - - // SyncedResources represents the resources that the syncer of the SyncTarget can sync. It MUST be - // updated by kcp server. - // +optional - SyncedResources []ResourceToSync `json:"syncedResources,omitempty"` - - // A timestamp indicating when the syncer last reported status. - // +optional - LastSyncerHeartbeatTime *metav1.Time `json:"lastSyncerHeartbeatTime,omitempty"` - - // VirtualWorkspaces contains all virtual workspace URLs. - // +optional - VirtualWorkspaces []VirtualWorkspace `json:"virtualWorkspaces,omitempty"` - - // TunnelWorkspaces contains all URLs (one per shard) that point to the SyncTarget - // workspace in order to setup the tunneler. - // +optional - TunnelWorkspaces []TunnelWorkspace `json:"tunnelWorkspaces,omitempty"` -} - -type ResourceToSync struct { - apisv1alpha1.GroupResource `json:","` - - // versions are the resource versions the syncer can choose to sync depending on - // availability on the downstream cluster. Conversion to the storage version, if necessary, - // will be done on the kcp side. The versions are ordered by precedence and the - // first version compatible is preferred by syncer. - // +kubebuilder:validation:MinItems=1 - // +required - // +kubebuilder:validation:Required - Versions []string `json:"versions"` - - // identityHash is the identity for a given APIExport that the APIResourceSchema belongs to. - // The hash can be found on APIExport and APIResourceSchema's status. - // It will be empty for core types. - // +optional - IdentityHash string `json:"identityHash"` - - // state indicate whether the resources schema is compatible to the SyncTarget. It must be updated - // by syncer after checking the API compatibility on SyncTarget. - // +kubebuilder:validation:Enum=Pending;Accepted;Incompatible - // +kubebuilder:default=Pending - // +optional - State ResourceCompatibleState `json:"state,omitempty"` -} - -type ResourceCompatibleState string - -const ( - // ResourceSchemaPendingState is the initial state indicating that the syncer has not report compatibility of the resource. - ResourceSchemaPendingState = "Pending" - // ResourceSchemaAcceptedState is the state that the resource schema is compatible and can be synced by syncer. - ResourceSchemaAcceptedState = "Accepted" - // ResourceSchemaIncompatibleState is the state that the resource schema is incompatible for syncer. - ResourceSchemaIncompatibleState = "Incompatible" -) - -type VirtualWorkspace struct { - // SyncerURL is the URL of the syncer virtual workspace. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:format:URL - // +required - SyncerURL string `json:"syncerURL"` - - // UpsyncerURL is the URL of the upsyncer virtual workspace. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:format:URL - // +required - UpsyncerURL string `json:"upsyncerURL"` -} - -type TunnelWorkspace struct { - // url is the URL the Syncer should use to connect - // to the Syncer tunnel for a given shard. - // - // +kubebuilder:validation:MinLength=1 - // +kubebuilder:format:URL - // +required - URL string `json:"url"` -} - -// SyncTargetList is a list of SyncTarget resources -// -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type SyncTargetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []SyncTarget `json:"items"` -} - -// ImportedAPISExportName is singleton name of compute service exports in location workspace. -const ImportedAPISExportName = "imported-apis" - -// Conditions and ConditionReasons for the kcp SyncTarget object. -const ( - // SyncerReady means the syncer is ready to transfer resources between KCP and the SyncTarget. - SyncerReady conditionsv1alpha1.ConditionType = "SyncerReady" - - // APIImporterReady means the APIImport component is ready to import APIs from the SyncTarget. - APIImporterReady conditionsv1alpha1.ConditionType = "APIImporterReady" - - // HeartbeatHealthy means the HeartbeatManager has seen a heartbeat for the SyncTarget within the expected interval. - HeartbeatHealthy conditionsv1alpha1.ConditionType = "HeartbeatHealthy" - - // SyncerAuthorized means the syncer is authorized to sync resources to downstream cluster. - SyncerAuthorized conditionsv1alpha1.ConditionType = "SyncerAuthorized" - - // ErrorHeartbeatMissedReason indicates that a heartbeat update was not received within the configured threshold. - ErrorHeartbeatMissedReason = "ErrorHeartbeat" -) - -func (in *SyncTarget) SetConditions(conditions conditionsv1alpha1.Conditions) { - in.Status.Conditions = conditions -} - -func (in *SyncTarget) GetConditions() conditionsv1alpha1.Conditions { - return in.Status.Conditions -} diff --git a/sdk/apis/workload/v1alpha1/types.go b/sdk/apis/workload/v1alpha1/types.go deleted file mode 100644 index d30a773b3db..00000000000 --- a/sdk/apis/workload/v1alpha1/types.go +++ /dev/null @@ -1,202 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -type ResourceState string - -const ( - // ResourceStatePending is the initial state of a resource after placement onto - // a sync target. Either some workload controller or some external coordination - // controller will set this to "Sync" when the resource is ready to be synced. - ResourceStatePending ResourceState = "" - // ResourceStateSync is the state of a resource when it is synced to the sync target. - // This includes the deletion process until the resource is deleted downstream and the - // syncer removes the state.workload.kcp.io/ label. - ResourceStateSync ResourceState = "Sync" - // ResourceStateUpsync is the state of a resource when it is synced up from the sync target. - // Compared to Sync state, this state is exclusive, meaning that only one sync target can - // be up-syncing a resource, and in addition, others sync targets cannot sync from this resource - // because the up-syncer is owning both the spec and the status of that resource. - ResourceStateUpsync ResourceState = "Upsync" -) - -const ( - // InternalClusterDeletionTimestampAnnotationPrefix is the prefix of the annotation - // - // deletion.internal.workload.kcp.io/ - // - // on upstream resources storing the timestamp when the sync target resource - // state was changed to "Delete". The syncer will see this timestamp as the deletion - // timestamp of the object. - // - // The format is RFC3339. - // - // TODO(sttts): use sync-target-uid instead of sync-target-name. - InternalClusterDeletionTimestampAnnotationPrefix = "deletion.internal.workload.kcp.io/" - - // ClusterFinalizerAnnotationPrefix is the prefix of the annotation - // - // finalizers.workload.kcp.io/ - // - // on upstream resources storing a comma-separated list of finalizer names that are set on - // the sync target resource in the view of the syncer. This blocks the deletion of the - // resource on that sync target. External (custom) controllers can set this annotation - // create back-pressure on the resource. - // - // TODO(sttts): use sync-target-uid instead of sync-target-name. - ClusterFinalizerAnnotationPrefix = "finalizers.workload.kcp.io/" - - // ClusterResourceStateLabelPrefix is the prefix of the label - // - // state.workload.kcp.io/ - // - // on upstream resources storing the state of the sync target syncer state machine. - // The workload controllers will set this label and the syncer will react and drive the - // life-cycle of the synced objects on the sync target. - // - // The format is a string, namely: - // - "": the object is assigned, but the syncer will ignore the object. A coordination - // controller will have to set the value to "Sync" after initializion in order to - // start the sync process. - // - "Sync": the object is assigned and the syncer will start the sync process. - // - // While being in "Sync" state, a deletion timestamp in deletion.internal.workload.kcp.io/ - // will signal the start of the deletion process of the object. During the deletion process - // the object will stay in "Sync" state. The syncer will block deletion while - // finalizers.workload.kcp.io/ exists and is non-empty, and it - // will eventually remove state.workload.kcp.io/ after - // the object has been deleted downstream. - // - // The workload controllers will consider the object deleted from the sync target when - // the label is removed. They then set the placement state to "Unbound". - ClusterResourceStateLabelPrefix = "state.workload.kcp.io/" - - // InternalSyncerViewAnnotationPrefix is the prefix of the annotation - // - // diff.syncer.internal.kcp.io/ - // - // on upstream resources storing the value of fields as they have been reported by the Syncer for the given SyncTarget, - // so possibly different from the field value in the upstream resource itself, and overriding it for the given SyncTarget. - // - // The format is a Json object, whose keys are fields identifiers (for example "status" or "spec.clusterIP"), - // and values are overriding field values. - InternalSyncerViewAnnotationPrefix = "diff.syncer.internal.kcp.io/" - - // InternalClusterStatusAnnotationPrefix is the prefix of the annotation - // - // experimental.status.workload.kcp.io/ - // - // on upstream resources storing the status of the downstream resource per sync target. - // Note that this is experimental and will disappear in the future without prior notice. It - // is used temporarily in the case that a resource is scheduled to multiple sync targets. - // - // The format is JSON. - InternalClusterStatusAnnotationPrefix = "experimental.status.workload.kcp.io/" - - // ClusterSpecDiffAnnotationPrefix is the prefix of the annotation - // - // experimental.spec-diff.workload.kcp.io/ - // - // on upstream resources storing the desired spec diffs to be applied to the resource when syncing - // down to the . This feature requires the "Advanced Scheduling" feature gate - // to be enabled. - // - // The patch will be applied to the resource Spec field of the resource, so the JSON root path is the - // resource's Spec field. - // - // The format for the value of this annotation is: JSON Patch (https://tools.ietf.org/html/rfc6902). - ClusterSpecDiffAnnotationPrefix = "experimental.spec-diff.workload.kcp.io/" - - // ExperimentalSummarizingRulesAnnotation - // - // experimental.summarizing.workload.kcp.io - // - // on upstream resources storing the JSON-encoded summarizing rules for this instance of the resource. - // The drives what fields should be overridden in the syncer view, and available for summarizing, - // and how they should be managed. - // - // To express that only the "status" field should be summarized, and promoted to the upstream - // resource when scheduled on only 1 SyncTarget, the annotation would be: - // - // [{"fieldPath": "status", "promoteToUpstream": true}] - // - // The format for the value of this annotation is a JSON Array of objects with 2 fields: - // - fieldPath: defines that path (dot-separated) of a field that should be summarized - // - promoteToUpstream: defines whether this field should be promoted to upstream when the - // resource is scheduled to only one SyncTarget. - ExperimentalSummarizingRulesAnnotation = "experimental.summarizing.workload.kcp.io" - - // InternalDownstreamClusterLabel is a label with the upstream cluster name applied on the downstream cluster - // instead of state.workload.kcp.io/ which is used upstream. - InternalDownstreamClusterLabel = "internal.workload.kcp.io/cluster" - - // AnnotationSkipDefaultObjectCreation is the annotation key for an apiexport or apibinding indicating the other default resources - // has been created already. If the created default resource is deleted, it will not be recreated. - AnnotationSkipDefaultObjectCreation = "workload.kcp.io/skip-default-object-creation" - - // InternalSyncTargetPlacementAnnotationKey is an internal annotation key on placement API to mark the synctarget scheduled - // from this placement. The value is a hash of the SyncTarget cluster name + SyncTarget name, generated with the ToSyncTargetKey(..) helper func. - InternalSyncTargetPlacementAnnotationKey = "internal.workload.kcp.io/synctarget" - - // InternalSyncTargetKeyLabel is an internal label set on a SyncTarget resource that contains the full hash of the SyncTargetKey, generated with the ToSyncTargetKey(..) - // helper func, this label is used for reverse lookups of a syncTargetKey to SyncTarget. - InternalSyncTargetKeyLabel = "internal.workload.kcp.io/key" - - // ComputeAPIExportAnnotationKey is an annotation key set on an APIExport when it will be used for compute, - // and its APIs are expected to be synced to a SyncTarget by the Syncer. The annotation will be continuously - // synced from the APIExport to all the APIBindings bound to this APIExport. The workload scheduler will - // check all the APIBindings with this annotation for scheduling purpose. - ComputeAPIExportAnnotationKey = "extra.apis.kcp.io/compute.workload.kcp.io" - - // ExperimentalUpsyncDerivedResourcesAnnotationKey is an annotation that can be set on a syncable resource. - // It defines the resource types of derived resources (i.e. resources created from the syncable resource - // by some controller and that will not exist without it) intended to be upsynced to KCP. - // - // It contains a command-separated list of stringified GroupResource (.). - // - // To allow upsyncing an Endpoints resource related to a synced service, the Service instance should be annotated with: - // - // experimental.workload.kcp.io/upsync-derived-resources: endpoints - // - // For now, only endpoints can be upsynced on demand by the syncer with this mechanism, - // but the list of such resources would increase in the future. - // - // Of course using this annotation also requires having, on the physical cluster, the appropriate logic - // that will effectively label the derived resources for Upsync. - // This logic should guard against upsyncing unexpected resources. - // In addition, Upsyncing is limited to a limited, well-defined list of resource types on the KCP side, - // so that simply adding this annotation on a synced resource will not be a security risk. - // - // This annotation is user-facing and would typically be set by the client creating the synced resource - // in the KCP workspace, be it the end-user or a third-party controller. - // - // It is experimental since the provided user-experience is unsatisfactory, - // and further work should be done to define such (up)syncing strategies at a more appropriate level - // (SyncTarget, KCP namespace, KCP workspace ?). - ExperimentalUpsyncDerivedResourcesAnnotationKey = "experimental.workload.kcp.io/upsync-derived-resources" - - // InternalWorkspaceURLAnnotationKey is an annotation dynamically added on resources exposed - // by the Syncer Virtual Workspace to be synced by the Syncer. - // It contains the external URL of the workspace the resource is part of. - // - // The Syncer doesn't have this information and needs it to correctly point some created downstream - // resources back to the right KCP workspace. - // - // internal.workload.kcp.io/workspace-url - // - InternalWorkspaceURLAnnotationKey = "internal.workload.kcp.io/workspace-url" -) diff --git a/sdk/apis/workload/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/workload/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index a6cde7ce9f0..00000000000 --- a/sdk/apis/workload/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,244 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - runtime "k8s.io/apimachinery/pkg/runtime" - - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceToSync) DeepCopyInto(out *ResourceToSync) { - *out = *in - out.GroupResource = in.GroupResource - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceToSync. -func (in *ResourceToSync) DeepCopy() *ResourceToSync { - if in == nil { - return nil - } - out := new(ResourceToSync) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SyncTarget) DeepCopyInto(out *SyncTarget) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncTarget. -func (in *SyncTarget) DeepCopy() *SyncTarget { - if in == nil { - return nil - } - out := new(SyncTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SyncTarget) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SyncTargetList) DeepCopyInto(out *SyncTargetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]SyncTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncTargetList. -func (in *SyncTargetList) DeepCopy() *SyncTargetList { - if in == nil { - return nil - } - out := new(SyncTargetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SyncTargetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SyncTargetSpec) DeepCopyInto(out *SyncTargetSpec) { - *out = *in - if in.EvictAfter != nil { - in, out := &in.EvictAfter, &out.EvictAfter - *out = (*in).DeepCopy() - } - if in.SupportedAPIExports != nil { - in, out := &in.SupportedAPIExports, &out.SupportedAPIExports - *out = make([]tenancyv1alpha1.APIExportReference, len(*in)) - copy(*out, *in) - } - if in.Cells != nil { - in, out := &in.Cells, &out.Cells - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncTargetSpec. -func (in *SyncTargetSpec) DeepCopy() *SyncTargetSpec { - if in == nil { - return nil - } - out := new(SyncTargetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SyncTargetStatus) DeepCopyInto(out *SyncTargetStatus) { - *out = *in - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = new(v1.ResourceList) - if **in != nil { - in, out := *in, *out - *out = make(map[v1.ResourceName]resource.Quantity, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = new(v1.ResourceList) - if **in != nil { - in, out := *in, *out - *out = make(map[v1.ResourceName]resource.Quantity, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(conditionsv1alpha1.Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SyncedResources != nil { - in, out := &in.SyncedResources, &out.SyncedResources - *out = make([]ResourceToSync, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LastSyncerHeartbeatTime != nil { - in, out := &in.LastSyncerHeartbeatTime, &out.LastSyncerHeartbeatTime - *out = (*in).DeepCopy() - } - if in.VirtualWorkspaces != nil { - in, out := &in.VirtualWorkspaces, &out.VirtualWorkspaces - *out = make([]VirtualWorkspace, len(*in)) - copy(*out, *in) - } - if in.TunnelWorkspaces != nil { - in, out := &in.TunnelWorkspaces, &out.TunnelWorkspaces - *out = make([]TunnelWorkspace, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncTargetStatus. -func (in *SyncTargetStatus) DeepCopy() *SyncTargetStatus { - if in == nil { - return nil - } - out := new(SyncTargetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TunnelWorkspace) DeepCopyInto(out *TunnelWorkspace) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TunnelWorkspace. -func (in *TunnelWorkspace) DeepCopy() *TunnelWorkspace { - if in == nil { - return nil - } - out := new(TunnelWorkspace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VirtualWorkspace) DeepCopyInto(out *VirtualWorkspace) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualWorkspace. -func (in *VirtualWorkspace) DeepCopy() *VirtualWorkspace { - if in == nil { - return nil - } - out := new(VirtualWorkspace) - in.DeepCopyInto(out) - return out -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/availableselectorlabel.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/availableselectorlabel.go deleted file mode 100644 index b1b4428b894..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/availableselectorlabel.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -// AvailableSelectorLabelApplyConfiguration represents an declarative configuration of the AvailableSelectorLabel type for use -// with apply. -type AvailableSelectorLabelApplyConfiguration struct { - Key *v1alpha1.LabelKey `json:"key,omitempty"` - Values []v1alpha1.LabelValue `json:"values,omitempty"` - Description *string `json:"description,omitempty"` -} - -// AvailableSelectorLabelApplyConfiguration constructs an declarative configuration of the AvailableSelectorLabel type for use with -// apply. -func AvailableSelectorLabel() *AvailableSelectorLabelApplyConfiguration { - return &AvailableSelectorLabelApplyConfiguration{} -} - -// WithKey sets the Key field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Key field is set to the value of the last call. -func (b *AvailableSelectorLabelApplyConfiguration) WithKey(value v1alpha1.LabelKey) *AvailableSelectorLabelApplyConfiguration { - b.Key = &value - return b -} - -// WithValues adds the given value to the Values field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Values field. -func (b *AvailableSelectorLabelApplyConfiguration) WithValues(values ...v1alpha1.LabelValue) *AvailableSelectorLabelApplyConfiguration { - for i := range values { - b.Values = append(b.Values, values[i]) - } - return b -} - -// WithDescription sets the Description field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Description field is set to the value of the last call. -func (b *AvailableSelectorLabelApplyConfiguration) WithDescription(value string) *AvailableSelectorLabelApplyConfiguration { - b.Description = &value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/groupversionresource.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/groupversionresource.go deleted file mode 100644 index 06bbb234700..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/groupversionresource.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// GroupVersionResourceApplyConfiguration represents an declarative configuration of the GroupVersionResource type for use -// with apply. -type GroupVersionResourceApplyConfiguration struct { - Group *string `json:"group,omitempty"` - Version *string `json:"version,omitempty"` - Resource *string `json:"resource,omitempty"` -} - -// GroupVersionResourceApplyConfiguration constructs an declarative configuration of the GroupVersionResource type for use with -// apply. -func GroupVersionResource() *GroupVersionResourceApplyConfiguration { - return &GroupVersionResourceApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *GroupVersionResourceApplyConfiguration) WithGroup(value string) *GroupVersionResourceApplyConfiguration { - b.Group = &value - return b -} - -// WithVersion sets the Version field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Version field is set to the value of the last call. -func (b *GroupVersionResourceApplyConfiguration) WithVersion(value string) *GroupVersionResourceApplyConfiguration { - b.Version = &value - return b -} - -// WithResource sets the Resource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Resource field is set to the value of the last call. -func (b *GroupVersionResourceApplyConfiguration) WithResource(value string) *GroupVersionResourceApplyConfiguration { - b.Resource = &value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/location.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/location.go deleted file mode 100644 index 5145c6531be..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/location.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - - v1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/meta/v1" -) - -// LocationApplyConfiguration represents an declarative configuration of the Location type for use -// with apply. -type LocationApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *LocationSpecApplyConfiguration `json:"spec,omitempty"` - Status *LocationStatusApplyConfiguration `json:"status,omitempty"` -} - -// Location constructs an declarative configuration of the Location type for use with -// apply. -func Location(name string) *LocationApplyConfiguration { - b := &LocationApplyConfiguration{} - b.WithName(name) - b.WithKind("Location") - b.WithAPIVersion("scheduling.kcp.io/v1alpha1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithKind(value string) *LocationApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithAPIVersion(value string) *LocationApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithName(value string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithGenerateName(value string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithNamespace(value string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithUID(value types.UID) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithResourceVersion(value string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithGeneration(value int64) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithCreationTimestamp(value metav1.Time) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *LocationApplyConfiguration) WithLabels(entries map[string]string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *LocationApplyConfiguration) WithAnnotations(entries map[string]string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *LocationApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *LocationApplyConfiguration) WithFinalizers(values ...string) *LocationApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *LocationApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithSpec(value *LocationSpecApplyConfiguration) *LocationApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *LocationApplyConfiguration) WithStatus(value *LocationStatusApplyConfiguration) *LocationApplyConfiguration { - b.Status = value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/locationreference.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/locationreference.go deleted file mode 100644 index 0df6d6079a8..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/locationreference.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// LocationReferenceApplyConfiguration represents an declarative configuration of the LocationReference type for use -// with apply. -type LocationReferenceApplyConfiguration struct { - Path *string `json:"path,omitempty"` - LocationName *string `json:"locationName,omitempty"` -} - -// LocationReferenceApplyConfiguration constructs an declarative configuration of the LocationReference type for use with -// apply. -func LocationReference() *LocationReferenceApplyConfiguration { - return &LocationReferenceApplyConfiguration{} -} - -// WithPath sets the Path field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Path field is set to the value of the last call. -func (b *LocationReferenceApplyConfiguration) WithPath(value string) *LocationReferenceApplyConfiguration { - b.Path = &value - return b -} - -// WithLocationName sets the LocationName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LocationName field is set to the value of the last call. -func (b *LocationReferenceApplyConfiguration) WithLocationName(value string) *LocationReferenceApplyConfiguration { - b.LocationName = &value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/locationspec.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/locationspec.go deleted file mode 100644 index 8910edcb1b4..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/locationspec.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/meta/v1" -) - -// LocationSpecApplyConfiguration represents an declarative configuration of the LocationSpec type for use -// with apply. -type LocationSpecApplyConfiguration struct { - Resource *GroupVersionResourceApplyConfiguration `json:"resource,omitempty"` - Description *string `json:"description,omitempty"` - AvailableSelectorLabels []AvailableSelectorLabelApplyConfiguration `json:"availableSelectorLabels,omitempty"` - InstanceSelector *v1.LabelSelectorApplyConfiguration `json:"instanceSelector,omitempty"` -} - -// LocationSpecApplyConfiguration constructs an declarative configuration of the LocationSpec type for use with -// apply. -func LocationSpec() *LocationSpecApplyConfiguration { - return &LocationSpecApplyConfiguration{} -} - -// WithResource sets the Resource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Resource field is set to the value of the last call. -func (b *LocationSpecApplyConfiguration) WithResource(value *GroupVersionResourceApplyConfiguration) *LocationSpecApplyConfiguration { - b.Resource = value - return b -} - -// WithDescription sets the Description field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Description field is set to the value of the last call. -func (b *LocationSpecApplyConfiguration) WithDescription(value string) *LocationSpecApplyConfiguration { - b.Description = &value - return b -} - -// WithAvailableSelectorLabels adds the given value to the AvailableSelectorLabels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the AvailableSelectorLabels field. -func (b *LocationSpecApplyConfiguration) WithAvailableSelectorLabels(values ...*AvailableSelectorLabelApplyConfiguration) *LocationSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithAvailableSelectorLabels") - } - b.AvailableSelectorLabels = append(b.AvailableSelectorLabels, *values[i]) - } - return b -} - -// WithInstanceSelector sets the InstanceSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the InstanceSelector field is set to the value of the last call. -func (b *LocationSpecApplyConfiguration) WithInstanceSelector(value *v1.LabelSelectorApplyConfiguration) *LocationSpecApplyConfiguration { - b.InstanceSelector = value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/locationstatus.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/locationstatus.go deleted file mode 100644 index d5062515dab..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/locationstatus.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// LocationStatusApplyConfiguration represents an declarative configuration of the LocationStatus type for use -// with apply. -type LocationStatusApplyConfiguration struct { - Instances *uint32 `json:"instances,omitempty"` - AvailableInstances *uint32 `json:"availableInstances,omitempty"` -} - -// LocationStatusApplyConfiguration constructs an declarative configuration of the LocationStatus type for use with -// apply. -func LocationStatus() *LocationStatusApplyConfiguration { - return &LocationStatusApplyConfiguration{} -} - -// WithInstances sets the Instances field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Instances field is set to the value of the last call. -func (b *LocationStatusApplyConfiguration) WithInstances(value uint32) *LocationStatusApplyConfiguration { - b.Instances = &value - return b -} - -// WithAvailableInstances sets the AvailableInstances field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the AvailableInstances field is set to the value of the last call. -func (b *LocationStatusApplyConfiguration) WithAvailableInstances(value uint32) *LocationStatusApplyConfiguration { - b.AvailableInstances = &value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/placement.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/placement.go deleted file mode 100644 index 5c5b8ed2ff7..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/placement.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - - v1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/meta/v1" -) - -// PlacementApplyConfiguration represents an declarative configuration of the Placement type for use -// with apply. -type PlacementApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PlacementSpecApplyConfiguration `json:"spec,omitempty"` - Status *PlacementStatusApplyConfiguration `json:"status,omitempty"` -} - -// Placement constructs an declarative configuration of the Placement type for use with -// apply. -func Placement(name string) *PlacementApplyConfiguration { - b := &PlacementApplyConfiguration{} - b.WithName(name) - b.WithKind("Placement") - b.WithAPIVersion("scheduling.kcp.io/v1alpha1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithKind(value string) *PlacementApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithAPIVersion(value string) *PlacementApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithName(value string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithGenerateName(value string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithNamespace(value string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithUID(value types.UID) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithResourceVersion(value string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithGeneration(value int64) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *PlacementApplyConfiguration) WithLabels(entries map[string]string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *PlacementApplyConfiguration) WithAnnotations(entries map[string]string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PlacementApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PlacementApplyConfiguration) WithFinalizers(values ...string) *PlacementApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *PlacementApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithSpec(value *PlacementSpecApplyConfiguration) *PlacementApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *PlacementApplyConfiguration) WithStatus(value *PlacementStatusApplyConfiguration) *PlacementApplyConfiguration { - b.Status = value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/placementspec.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/placementspec.go deleted file mode 100644 index 44e9bef8f4f..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/placementspec.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/meta/v1" -) - -// PlacementSpecApplyConfiguration represents an declarative configuration of the PlacementSpec type for use -// with apply. -type PlacementSpecApplyConfiguration struct { - LocationSelectors []v1.LabelSelectorApplyConfiguration `json:"locationSelectors,omitempty"` - LocationResource *GroupVersionResourceApplyConfiguration `json:"locationResource,omitempty"` - NamespaceSelector *v1.LabelSelectorApplyConfiguration `json:"namespaceSelector,omitempty"` - LocationWorkspace *string `json:"locationWorkspace,omitempty"` -} - -// PlacementSpecApplyConfiguration constructs an declarative configuration of the PlacementSpec type for use with -// apply. -func PlacementSpec() *PlacementSpecApplyConfiguration { - return &PlacementSpecApplyConfiguration{} -} - -// WithLocationSelectors adds the given value to the LocationSelectors field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the LocationSelectors field. -func (b *PlacementSpecApplyConfiguration) WithLocationSelectors(values ...*v1.LabelSelectorApplyConfiguration) *PlacementSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithLocationSelectors") - } - b.LocationSelectors = append(b.LocationSelectors, *values[i]) - } - return b -} - -// WithLocationResource sets the LocationResource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LocationResource field is set to the value of the last call. -func (b *PlacementSpecApplyConfiguration) WithLocationResource(value *GroupVersionResourceApplyConfiguration) *PlacementSpecApplyConfiguration { - b.LocationResource = value - return b -} - -// WithNamespaceSelector sets the NamespaceSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NamespaceSelector field is set to the value of the last call. -func (b *PlacementSpecApplyConfiguration) WithNamespaceSelector(value *v1.LabelSelectorApplyConfiguration) *PlacementSpecApplyConfiguration { - b.NamespaceSelector = value - return b -} - -// WithLocationWorkspace sets the LocationWorkspace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LocationWorkspace field is set to the value of the last call. -func (b *PlacementSpecApplyConfiguration) WithLocationWorkspace(value string) *PlacementSpecApplyConfiguration { - b.LocationWorkspace = &value - return b -} diff --git a/sdk/client/applyconfiguration/scheduling/v1alpha1/placementstatus.go b/sdk/client/applyconfiguration/scheduling/v1alpha1/placementstatus.go deleted file mode 100644 index 3cfe439716e..00000000000 --- a/sdk/client/applyconfiguration/scheduling/v1alpha1/placementstatus.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" -) - -// PlacementStatusApplyConfiguration represents an declarative configuration of the PlacementStatus type for use -// with apply. -type PlacementStatusApplyConfiguration struct { - Phase *v1alpha1.PlacementPhase `json:"phase,omitempty"` - SelectedLocation *LocationReferenceApplyConfiguration `json:"selectedLocation,omitempty"` - Conditions *conditionsv1alpha1.Conditions `json:"conditions,omitempty"` -} - -// PlacementStatusApplyConfiguration constructs an declarative configuration of the PlacementStatus type for use with -// apply. -func PlacementStatus() *PlacementStatusApplyConfiguration { - return &PlacementStatusApplyConfiguration{} -} - -// WithPhase sets the Phase field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Phase field is set to the value of the last call. -func (b *PlacementStatusApplyConfiguration) WithPhase(value v1alpha1.PlacementPhase) *PlacementStatusApplyConfiguration { - b.Phase = &value - return b -} - -// WithSelectedLocation sets the SelectedLocation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelectedLocation field is set to the value of the last call. -func (b *PlacementStatusApplyConfiguration) WithSelectedLocation(value *LocationReferenceApplyConfiguration) *PlacementStatusApplyConfiguration { - b.SelectedLocation = value - return b -} - -// WithConditions sets the Conditions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Conditions field is set to the value of the last call. -func (b *PlacementStatusApplyConfiguration) WithConditions(value conditionsv1alpha1.Conditions) *PlacementStatusApplyConfiguration { - b.Conditions = &value - return b -} diff --git a/sdk/client/applyconfiguration/utils.go b/sdk/client/applyconfiguration/utils.go index fd8a903cd1b..08f4f9e0568 100644 --- a/sdk/client/applyconfiguration/utils.go +++ b/sdk/client/applyconfiguration/utils.go @@ -26,21 +26,17 @@ import ( v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" apiextensionsv1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/apiextensions/v1" apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/apiresource/v1alpha1" applyconfigurationapisv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/apis/v1alpha1" applyconfigurationconditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/conditions/v1alpha1" applyconfigurationcorev1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/core/v1alpha1" applyconfigurationmetav1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/meta/v1" - applyconfigurationschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" applyconfigurationtenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/tenancy/v1alpha1" applyconfigurationtopologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/topology/v1alpha1" - applyconfigurationworkloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/workload/v1alpha1" ) // ForKind returns an apply configuration type for the given GroupVersionKind, or nil if no @@ -203,26 +199,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { case metav1.SchemeGroupVersion.WithKind("TypeMeta"): return &applyconfigurationmetav1.TypeMetaApplyConfiguration{} - // Group=scheduling.kcp.io, Version=v1alpha1 - case schedulingv1alpha1.SchemeGroupVersion.WithKind("AvailableSelectorLabel"): - return &applyconfigurationschedulingv1alpha1.AvailableSelectorLabelApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("GroupVersionResource"): - return &applyconfigurationschedulingv1alpha1.GroupVersionResourceApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("Location"): - return &applyconfigurationschedulingv1alpha1.LocationApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("LocationReference"): - return &applyconfigurationschedulingv1alpha1.LocationReferenceApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("LocationSpec"): - return &applyconfigurationschedulingv1alpha1.LocationSpecApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("LocationStatus"): - return &applyconfigurationschedulingv1alpha1.LocationStatusApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("Placement"): - return &applyconfigurationschedulingv1alpha1.PlacementApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("PlacementSpec"): - return &applyconfigurationschedulingv1alpha1.PlacementSpecApplyConfiguration{} - case schedulingv1alpha1.SchemeGroupVersion.WithKind("PlacementStatus"): - return &applyconfigurationschedulingv1alpha1.PlacementStatusApplyConfiguration{} - // Group=tenancy.kcp.io, Version=v1alpha1 case tenancyv1alpha1.SchemeGroupVersion.WithKind("APIExportReference"): return &applyconfigurationtenancyv1alpha1.APIExportReferenceApplyConfiguration{} @@ -261,20 +237,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { case topologyv1alpha1.SchemeGroupVersion.WithKind("PartitionSpec"): return &applyconfigurationtopologyv1alpha1.PartitionSpecApplyConfiguration{} - // Group=workload.kcp.io, Version=v1alpha1 - case workloadv1alpha1.SchemeGroupVersion.WithKind("ResourceToSync"): - return &applyconfigurationworkloadv1alpha1.ResourceToSyncApplyConfiguration{} - case workloadv1alpha1.SchemeGroupVersion.WithKind("SyncTarget"): - return &applyconfigurationworkloadv1alpha1.SyncTargetApplyConfiguration{} - case workloadv1alpha1.SchemeGroupVersion.WithKind("SyncTargetSpec"): - return &applyconfigurationworkloadv1alpha1.SyncTargetSpecApplyConfiguration{} - case workloadv1alpha1.SchemeGroupVersion.WithKind("SyncTargetStatus"): - return &applyconfigurationworkloadv1alpha1.SyncTargetStatusApplyConfiguration{} - case workloadv1alpha1.SchemeGroupVersion.WithKind("TunnelWorkspace"): - return &applyconfigurationworkloadv1alpha1.TunnelWorkspaceApplyConfiguration{} - case workloadv1alpha1.SchemeGroupVersion.WithKind("VirtualWorkspace"): - return &applyconfigurationworkloadv1alpha1.VirtualWorkspaceApplyConfiguration{} - } return nil } diff --git a/sdk/client/applyconfiguration/workload/v1alpha1/resourcetosync.go b/sdk/client/applyconfiguration/workload/v1alpha1/resourcetosync.go deleted file mode 100644 index eb33bf813bb..00000000000 --- a/sdk/client/applyconfiguration/workload/v1alpha1/resourcetosync.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - v1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/apis/v1alpha1" -) - -// ResourceToSyncApplyConfiguration represents an declarative configuration of the ResourceToSync type for use -// with apply. -type ResourceToSyncApplyConfiguration struct { - *v1alpha1.GroupResourceApplyConfiguration `json:"GroupResource,omitempty"` - Versions []string `json:"versions,omitempty"` - IdentityHash *string `json:"identityHash,omitempty"` - State *workloadv1alpha1.ResourceCompatibleState `json:"state,omitempty"` -} - -// ResourceToSyncApplyConfiguration constructs an declarative configuration of the ResourceToSync type for use with -// apply. -func ResourceToSync() *ResourceToSyncApplyConfiguration { - return &ResourceToSyncApplyConfiguration{} -} - -// WithGroup sets the Group field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Group field is set to the value of the last call. -func (b *ResourceToSyncApplyConfiguration) WithGroup(value string) *ResourceToSyncApplyConfiguration { - b.ensureGroupResourceApplyConfigurationExists() - b.Group = &value - return b -} - -// WithResource sets the Resource field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Resource field is set to the value of the last call. -func (b *ResourceToSyncApplyConfiguration) WithResource(value string) *ResourceToSyncApplyConfiguration { - b.ensureGroupResourceApplyConfigurationExists() - b.Resource = &value - return b -} - -func (b *ResourceToSyncApplyConfiguration) ensureGroupResourceApplyConfigurationExists() { - if b.GroupResourceApplyConfiguration == nil { - b.GroupResourceApplyConfiguration = &v1alpha1.GroupResourceApplyConfiguration{} - } -} - -// WithVersions adds the given value to the Versions field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Versions field. -func (b *ResourceToSyncApplyConfiguration) WithVersions(values ...string) *ResourceToSyncApplyConfiguration { - for i := range values { - b.Versions = append(b.Versions, values[i]) - } - return b -} - -// WithIdentityHash sets the IdentityHash field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IdentityHash field is set to the value of the last call. -func (b *ResourceToSyncApplyConfiguration) WithIdentityHash(value string) *ResourceToSyncApplyConfiguration { - b.IdentityHash = &value - return b -} - -// WithState sets the State field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the State field is set to the value of the last call. -func (b *ResourceToSyncApplyConfiguration) WithState(value workloadv1alpha1.ResourceCompatibleState) *ResourceToSyncApplyConfiguration { - b.State = &value - return b -} diff --git a/sdk/client/applyconfiguration/workload/v1alpha1/synctarget.go b/sdk/client/applyconfiguration/workload/v1alpha1/synctarget.go deleted file mode 100644 index a16f85580d8..00000000000 --- a/sdk/client/applyconfiguration/workload/v1alpha1/synctarget.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - - v1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/meta/v1" -) - -// SyncTargetApplyConfiguration represents an declarative configuration of the SyncTarget type for use -// with apply. -type SyncTargetApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *SyncTargetSpecApplyConfiguration `json:"spec,omitempty"` - Status *SyncTargetStatusApplyConfiguration `json:"status,omitempty"` -} - -// SyncTarget constructs an declarative configuration of the SyncTarget type for use with -// apply. -func SyncTarget(name string) *SyncTargetApplyConfiguration { - b := &SyncTargetApplyConfiguration{} - b.WithName(name) - b.WithKind("SyncTarget") - b.WithAPIVersion("workload.kcp.io/v1alpha1") - return b -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithKind(value string) *SyncTargetApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithAPIVersion(value string) *SyncTargetApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithName(value string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithGenerateName(value string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithNamespace(value string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithUID(value types.UID) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithResourceVersion(value string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithGeneration(value int64) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithCreationTimestamp(value metav1.Time) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *SyncTargetApplyConfiguration) WithLabels(entries map[string]string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *SyncTargetApplyConfiguration) WithAnnotations(entries map[string]string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *SyncTargetApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *SyncTargetApplyConfiguration) WithFinalizers(values ...string) *SyncTargetApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *SyncTargetApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithSpec(value *SyncTargetSpecApplyConfiguration) *SyncTargetApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *SyncTargetApplyConfiguration) WithStatus(value *SyncTargetStatusApplyConfiguration) *SyncTargetApplyConfiguration { - b.Status = value - return b -} diff --git a/sdk/client/applyconfiguration/workload/v1alpha1/synctargetspec.go b/sdk/client/applyconfiguration/workload/v1alpha1/synctargetspec.go deleted file mode 100644 index f299883ac66..00000000000 --- a/sdk/client/applyconfiguration/workload/v1alpha1/synctargetspec.go +++ /dev/null @@ -1,83 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/tenancy/v1alpha1" -) - -// SyncTargetSpecApplyConfiguration represents an declarative configuration of the SyncTargetSpec type for use -// with apply. -type SyncTargetSpecApplyConfiguration struct { - Unschedulable *bool `json:"unschedulable,omitempty"` - EvictAfter *v1.Time `json:"evictAfter,omitempty"` - SupportedAPIExports []v1alpha1.APIExportReferenceApplyConfiguration `json:"supportedAPIExports,omitempty"` - Cells map[string]string `json:"cells,omitempty"` -} - -// SyncTargetSpecApplyConfiguration constructs an declarative configuration of the SyncTargetSpec type for use with -// apply. -func SyncTargetSpec() *SyncTargetSpecApplyConfiguration { - return &SyncTargetSpecApplyConfiguration{} -} - -// WithUnschedulable sets the Unschedulable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Unschedulable field is set to the value of the last call. -func (b *SyncTargetSpecApplyConfiguration) WithUnschedulable(value bool) *SyncTargetSpecApplyConfiguration { - b.Unschedulable = &value - return b -} - -// WithEvictAfter sets the EvictAfter field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the EvictAfter field is set to the value of the last call. -func (b *SyncTargetSpecApplyConfiguration) WithEvictAfter(value v1.Time) *SyncTargetSpecApplyConfiguration { - b.EvictAfter = &value - return b -} - -// WithSupportedAPIExports adds the given value to the SupportedAPIExports field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the SupportedAPIExports field. -func (b *SyncTargetSpecApplyConfiguration) WithSupportedAPIExports(values ...*v1alpha1.APIExportReferenceApplyConfiguration) *SyncTargetSpecApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithSupportedAPIExports") - } - b.SupportedAPIExports = append(b.SupportedAPIExports, *values[i]) - } - return b -} - -// WithCells puts the entries into the Cells field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Cells field, -// overwriting an existing map entries in Cells field with the same key. -func (b *SyncTargetSpecApplyConfiguration) WithCells(entries map[string]string) *SyncTargetSpecApplyConfiguration { - if b.Cells == nil && len(entries) > 0 { - b.Cells = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Cells[k] = v - } - return b -} diff --git a/sdk/client/applyconfiguration/workload/v1alpha1/synctargetstatus.go b/sdk/client/applyconfiguration/workload/v1alpha1/synctargetstatus.go deleted file mode 100644 index a4083e3fb6d..00000000000 --- a/sdk/client/applyconfiguration/workload/v1alpha1/synctargetstatus.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" -) - -// SyncTargetStatusApplyConfiguration represents an declarative configuration of the SyncTargetStatus type for use -// with apply. -type SyncTargetStatusApplyConfiguration struct { - Allocatable *v1.ResourceList `json:"allocatable,omitempty"` - Capacity *v1.ResourceList `json:"capacity,omitempty"` - Conditions *v1alpha1.Conditions `json:"conditions,omitempty"` - SyncedResources []ResourceToSyncApplyConfiguration `json:"syncedResources,omitempty"` - LastSyncerHeartbeatTime *metav1.Time `json:"lastSyncerHeartbeatTime,omitempty"` - VirtualWorkspaces []VirtualWorkspaceApplyConfiguration `json:"virtualWorkspaces,omitempty"` - TunnelWorkspaces []TunnelWorkspaceApplyConfiguration `json:"tunnelWorkspaces,omitempty"` -} - -// SyncTargetStatusApplyConfiguration constructs an declarative configuration of the SyncTargetStatus type for use with -// apply. -func SyncTargetStatus() *SyncTargetStatusApplyConfiguration { - return &SyncTargetStatusApplyConfiguration{} -} - -// WithAllocatable sets the Allocatable field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Allocatable field is set to the value of the last call. -func (b *SyncTargetStatusApplyConfiguration) WithAllocatable(value v1.ResourceList) *SyncTargetStatusApplyConfiguration { - b.Allocatable = &value - return b -} - -// WithCapacity sets the Capacity field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Capacity field is set to the value of the last call. -func (b *SyncTargetStatusApplyConfiguration) WithCapacity(value v1.ResourceList) *SyncTargetStatusApplyConfiguration { - b.Capacity = &value - return b -} - -// WithConditions sets the Conditions field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Conditions field is set to the value of the last call. -func (b *SyncTargetStatusApplyConfiguration) WithConditions(value v1alpha1.Conditions) *SyncTargetStatusApplyConfiguration { - b.Conditions = &value - return b -} - -// WithSyncedResources adds the given value to the SyncedResources field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the SyncedResources field. -func (b *SyncTargetStatusApplyConfiguration) WithSyncedResources(values ...*ResourceToSyncApplyConfiguration) *SyncTargetStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithSyncedResources") - } - b.SyncedResources = append(b.SyncedResources, *values[i]) - } - return b -} - -// WithLastSyncerHeartbeatTime sets the LastSyncerHeartbeatTime field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the LastSyncerHeartbeatTime field is set to the value of the last call. -func (b *SyncTargetStatusApplyConfiguration) WithLastSyncerHeartbeatTime(value metav1.Time) *SyncTargetStatusApplyConfiguration { - b.LastSyncerHeartbeatTime = &value - return b -} - -// WithVirtualWorkspaces adds the given value to the VirtualWorkspaces field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the VirtualWorkspaces field. -func (b *SyncTargetStatusApplyConfiguration) WithVirtualWorkspaces(values ...*VirtualWorkspaceApplyConfiguration) *SyncTargetStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithVirtualWorkspaces") - } - b.VirtualWorkspaces = append(b.VirtualWorkspaces, *values[i]) - } - return b -} - -// WithTunnelWorkspaces adds the given value to the TunnelWorkspaces field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the TunnelWorkspaces field. -func (b *SyncTargetStatusApplyConfiguration) WithTunnelWorkspaces(values ...*TunnelWorkspaceApplyConfiguration) *SyncTargetStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithTunnelWorkspaces") - } - b.TunnelWorkspaces = append(b.TunnelWorkspaces, *values[i]) - } - return b -} diff --git a/sdk/client/applyconfiguration/workload/v1alpha1/tunnelworkspace.go b/sdk/client/applyconfiguration/workload/v1alpha1/tunnelworkspace.go deleted file mode 100644 index 792c8e5f116..00000000000 --- a/sdk/client/applyconfiguration/workload/v1alpha1/tunnelworkspace.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// TunnelWorkspaceApplyConfiguration represents an declarative configuration of the TunnelWorkspace type for use -// with apply. -type TunnelWorkspaceApplyConfiguration struct { - URL *string `json:"url,omitempty"` -} - -// TunnelWorkspaceApplyConfiguration constructs an declarative configuration of the TunnelWorkspace type for use with -// apply. -func TunnelWorkspace() *TunnelWorkspaceApplyConfiguration { - return &TunnelWorkspaceApplyConfiguration{} -} - -// WithURL sets the URL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the URL field is set to the value of the last call. -func (b *TunnelWorkspaceApplyConfiguration) WithURL(value string) *TunnelWorkspaceApplyConfiguration { - b.URL = &value - return b -} diff --git a/sdk/client/applyconfiguration/workload/v1alpha1/virtualworkspace.go b/sdk/client/applyconfiguration/workload/v1alpha1/virtualworkspace.go deleted file mode 100644 index 9ea0707b32c..00000000000 --- a/sdk/client/applyconfiguration/workload/v1alpha1/virtualworkspace.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -// VirtualWorkspaceApplyConfiguration represents an declarative configuration of the VirtualWorkspace type for use -// with apply. -type VirtualWorkspaceApplyConfiguration struct { - SyncerURL *string `json:"syncerURL,omitempty"` - UpsyncerURL *string `json:"upsyncerURL,omitempty"` -} - -// VirtualWorkspaceApplyConfiguration constructs an declarative configuration of the VirtualWorkspace type for use with -// apply. -func VirtualWorkspace() *VirtualWorkspaceApplyConfiguration { - return &VirtualWorkspaceApplyConfiguration{} -} - -// WithSyncerURL sets the SyncerURL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SyncerURL field is set to the value of the last call. -func (b *VirtualWorkspaceApplyConfiguration) WithSyncerURL(value string) *VirtualWorkspaceApplyConfiguration { - b.SyncerURL = &value - return b -} - -// WithUpsyncerURL sets the UpsyncerURL field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UpsyncerURL field is set to the value of the last call. -func (b *VirtualWorkspaceApplyConfiguration) WithUpsyncerURL(value string) *VirtualWorkspaceApplyConfiguration { - b.UpsyncerURL = &value - return b -} diff --git a/sdk/client/clientset/versioned/clientset.go b/sdk/client/clientset/versioned/clientset.go index 89ebf104dd4..b0b801c1c44 100644 --- a/sdk/client/clientset/versioned/clientset.go +++ b/sdk/client/clientset/versioned/clientset.go @@ -29,10 +29,8 @@ import ( apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" ) type Interface interface { @@ -40,10 +38,8 @@ type Interface interface { ApiresourceV1alpha1() apiresourcev1alpha1.ApiresourceV1alpha1Interface ApisV1alpha1() apisv1alpha1.ApisV1alpha1Interface CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface - SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1Interface TopologyV1alpha1() topologyv1alpha1.TopologyV1alpha1Interface - WorkloadV1alpha1() workloadv1alpha1.WorkloadV1alpha1Interface } // Clientset contains the clients for groups. @@ -52,10 +48,8 @@ type Clientset struct { apiresourceV1alpha1 *apiresourcev1alpha1.ApiresourceV1alpha1Client apisV1alpha1 *apisv1alpha1.ApisV1alpha1Client coreV1alpha1 *corev1alpha1.CoreV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client tenancyV1alpha1 *tenancyv1alpha1.TenancyV1alpha1Client topologyV1alpha1 *topologyv1alpha1.TopologyV1alpha1Client - workloadV1alpha1 *workloadv1alpha1.WorkloadV1alpha1Client } // ApiresourceV1alpha1 retrieves the ApiresourceV1alpha1Client @@ -73,11 +67,6 @@ func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface { return c.coreV1alpha1 } -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client -func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { - return c.schedulingV1alpha1 -} - // TenancyV1alpha1 retrieves the TenancyV1alpha1Client func (c *Clientset) TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1Interface { return c.tenancyV1alpha1 @@ -88,11 +77,6 @@ func (c *Clientset) TopologyV1alpha1() topologyv1alpha1.TopologyV1alpha1Interfac return c.topologyV1alpha1 } -// WorkloadV1alpha1 retrieves the WorkloadV1alpha1Client -func (c *Clientset) WorkloadV1alpha1() workloadv1alpha1.WorkloadV1alpha1Interface { - return c.workloadV1alpha1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -149,10 +133,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.tenancyV1alpha1, err = tenancyv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -161,10 +141,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } - cs.workloadV1alpha1, err = workloadv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -189,10 +165,8 @@ func New(c rest.Interface) *Clientset { cs.apiresourceV1alpha1 = apiresourcev1alpha1.New(c) cs.apisV1alpha1 = apisv1alpha1.New(c) cs.coreV1alpha1 = corev1alpha1.New(c) - cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.tenancyV1alpha1 = tenancyv1alpha1.New(c) cs.topologyV1alpha1 = topologyv1alpha1.New(c) - cs.workloadV1alpha1 = workloadv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/sdk/client/clientset/versioned/cluster/clientset.go b/sdk/client/clientset/versioned/cluster/clientset.go index 7ee35251b95..335f1fcac4a 100644 --- a/sdk/client/clientset/versioned/cluster/clientset.go +++ b/sdk/client/clientset/versioned/cluster/clientset.go @@ -36,10 +36,8 @@ import ( apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1" ) type ClusterInterface interface { @@ -48,10 +46,8 @@ type ClusterInterface interface { ApiresourceV1alpha1() apiresourcev1alpha1.ApiresourceV1alpha1ClusterInterface ApisV1alpha1() apisv1alpha1.ApisV1alpha1ClusterInterface CoreV1alpha1() corev1alpha1.CoreV1alpha1ClusterInterface - SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1ClusterInterface TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1ClusterInterface TopologyV1alpha1() topologyv1alpha1.TopologyV1alpha1ClusterInterface - WorkloadV1alpha1() workloadv1alpha1.WorkloadV1alpha1ClusterInterface } // ClusterClientset contains the clients for groups. @@ -61,10 +57,8 @@ type ClusterClientset struct { apiresourceV1alpha1 *apiresourcev1alpha1.ApiresourceV1alpha1ClusterClient apisV1alpha1 *apisv1alpha1.ApisV1alpha1ClusterClient coreV1alpha1 *corev1alpha1.CoreV1alpha1ClusterClient - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1ClusterClient tenancyV1alpha1 *tenancyv1alpha1.TenancyV1alpha1ClusterClient topologyV1alpha1 *topologyv1alpha1.TopologyV1alpha1ClusterClient - workloadV1alpha1 *workloadv1alpha1.WorkloadV1alpha1ClusterClient } // Discovery retrieves the DiscoveryClient @@ -90,11 +84,6 @@ func (c *ClusterClientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1ClusterInterf return c.coreV1alpha1 } -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1ClusterClient. -func (c *ClusterClientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1ClusterInterface { - return c.schedulingV1alpha1 -} - // TenancyV1alpha1 retrieves the TenancyV1alpha1ClusterClient. func (c *ClusterClientset) TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1ClusterInterface { return c.tenancyV1alpha1 @@ -105,11 +94,6 @@ func (c *ClusterClientset) TopologyV1alpha1() topologyv1alpha1.TopologyV1alpha1C return c.topologyV1alpha1 } -// WorkloadV1alpha1 retrieves the WorkloadV1alpha1ClusterClient. -func (c *ClusterClientset) WorkloadV1alpha1() workloadv1alpha1.WorkloadV1alpha1ClusterInterface { - return c.workloadV1alpha1 -} - // Cluster scopes this clientset to one cluster. func (c *ClusterClientset) Cluster(clusterPath logicalcluster.Path) client.Interface { if clusterPath == logicalcluster.Wildcard { @@ -174,10 +158,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*ClusterCli if err != nil { return nil, err } - cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.tenancyV1alpha1, err = tenancyv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -186,10 +166,6 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*ClusterCli if err != nil { return nil, err } - cs.workloadV1alpha1, err = workloadv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) - if err != nil { - return nil, err - } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { diff --git a/sdk/client/clientset/versioned/cluster/fake/clientset.go b/sdk/client/clientset/versioned/cluster/fake/clientset.go index cd4b8c66258..9066dd2c766 100644 --- a/sdk/client/clientset/versioned/cluster/fake/clientset.go +++ b/sdk/client/clientset/versioned/cluster/fake/clientset.go @@ -37,22 +37,16 @@ import ( fakeapisv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/apis/v1alpha1/fake" kcpcorev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/core/v1alpha1" fakecorev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/core/v1alpha1/fake" - kcpschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1" - fakeschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake" kcptenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/tenancy/v1alpha1" faketenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/tenancy/v1alpha1/fake" kcptopologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/topology/v1alpha1" faketopologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/topology/v1alpha1/fake" - kcpworkloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1" - fakeworkloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake" clientscheme "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -104,11 +98,6 @@ func (c *ClusterClientset) CoreV1alpha1() kcpcorev1alpha1.CoreV1alpha1ClusterInt return &fakecorev1alpha1.CoreV1alpha1ClusterClient{Fake: c.Fake} } -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1ClusterClient. -func (c *ClusterClientset) SchedulingV1alpha1() kcpschedulingv1alpha1.SchedulingV1alpha1ClusterInterface { - return &fakeschedulingv1alpha1.SchedulingV1alpha1ClusterClient{Fake: c.Fake} -} - // TenancyV1alpha1 retrieves the TenancyV1alpha1ClusterClient. func (c *ClusterClientset) TenancyV1alpha1() kcptenancyv1alpha1.TenancyV1alpha1ClusterInterface { return &faketenancyv1alpha1.TenancyV1alpha1ClusterClient{Fake: c.Fake} @@ -119,11 +108,6 @@ func (c *ClusterClientset) TopologyV1alpha1() kcptopologyv1alpha1.TopologyV1alph return &faketopologyv1alpha1.TopologyV1alpha1ClusterClient{Fake: c.Fake} } -// WorkloadV1alpha1 retrieves the WorkloadV1alpha1ClusterClient. -func (c *ClusterClientset) WorkloadV1alpha1() kcpworkloadv1alpha1.WorkloadV1alpha1ClusterInterface { - return &fakeworkloadv1alpha1.WorkloadV1alpha1ClusterClient{Fake: c.Fake} -} - // Cluster scopes this clientset to one cluster. func (c *ClusterClientset) Cluster(clusterPath logicalcluster.Path) client.Interface { if clusterPath == logicalcluster.Wildcard { @@ -171,11 +155,6 @@ func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface { return &fakecorev1alpha1.CoreV1alpha1Client{Fake: c.Fake, ClusterPath: c.clusterPath} } -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client. -func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { - return &fakeschedulingv1alpha1.SchedulingV1alpha1Client{Fake: c.Fake, ClusterPath: c.clusterPath} -} - // TenancyV1alpha1 retrieves the TenancyV1alpha1Client. func (c *Clientset) TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1Interface { return &faketenancyv1alpha1.TenancyV1alpha1Client{Fake: c.Fake, ClusterPath: c.clusterPath} @@ -185,8 +164,3 @@ func (c *Clientset) TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1Interface { func (c *Clientset) TopologyV1alpha1() topologyv1alpha1.TopologyV1alpha1Interface { return &faketopologyv1alpha1.TopologyV1alpha1Client{Fake: c.Fake, ClusterPath: c.clusterPath} } - -// WorkloadV1alpha1 retrieves the WorkloadV1alpha1Client. -func (c *Clientset) WorkloadV1alpha1() workloadv1alpha1.WorkloadV1alpha1Interface { - return &fakeworkloadv1alpha1.WorkloadV1alpha1Client{Fake: c.Fake, ClusterPath: c.clusterPath} -} diff --git a/sdk/client/clientset/versioned/cluster/scheme/register.go b/sdk/client/clientset/versioned/cluster/scheme/register.go index a27494a6615..df030f3dcf4 100644 --- a/sdk/client/clientset/versioned/cluster/scheme/register.go +++ b/sdk/client/clientset/versioned/cluster/scheme/register.go @@ -31,10 +31,8 @@ import ( apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) var Scheme = runtime.NewScheme() @@ -44,10 +42,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ apiresourcev1alpha1.AddToScheme, apisv1alpha1.AddToScheme, corev1alpha1.AddToScheme, - schedulingv1alpha1.AddToScheme, tenancyv1alpha1.AddToScheme, topologyv1alpha1.AddToScheme, - workloadv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/location.go b/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/location.go deleted file mode 100644 index 3c53a12b126..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/location.go +++ /dev/null @@ -1,202 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package fake - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/kcp-dev/logicalcluster/v3" - - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/testing" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - applyconfigurationsschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -var locationsResource = schema.GroupVersionResource{Group: "scheduling.kcp.io", Version: "v1alpha1", Resource: "locations"} -var locationsKind = schema.GroupVersionKind{Group: "scheduling.kcp.io", Version: "v1alpha1", Kind: "Location"} - -type locationsClusterClient struct { - *kcptesting.Fake -} - -// Cluster scopes the client down to a particular cluster. -func (c *locationsClusterClient) Cluster(clusterPath logicalcluster.Path) schedulingv1alpha1client.LocationInterface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - - return &locationsClient{Fake: c.Fake, ClusterPath: clusterPath} -} - -// List takes label and field selectors, and returns the list of Locations that match those selectors across all clusters. -func (c *locationsClusterClient) List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.LocationList, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootListAction(locationsResource, locationsKind, logicalcluster.Wildcard, opts), &schedulingv1alpha1.LocationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &schedulingv1alpha1.LocationList{ListMeta: obj.(*schedulingv1alpha1.LocationList).ListMeta} - for _, item := range obj.(*schedulingv1alpha1.LocationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested Locations across all clusters. -func (c *locationsClusterClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(kcptesting.NewRootWatchAction(locationsResource, logicalcluster.Wildcard, opts)) -} - -type locationsClient struct { - *kcptesting.Fake - ClusterPath logicalcluster.Path -} - -func (c *locationsClient) Create(ctx context.Context, location *schedulingv1alpha1.Location, opts metav1.CreateOptions) (*schedulingv1alpha1.Location, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootCreateAction(locationsResource, c.ClusterPath, location), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} - -func (c *locationsClient) Update(ctx context.Context, location *schedulingv1alpha1.Location, opts metav1.UpdateOptions) (*schedulingv1alpha1.Location, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootUpdateAction(locationsResource, c.ClusterPath, location), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} - -func (c *locationsClient) UpdateStatus(ctx context.Context, location *schedulingv1alpha1.Location, opts metav1.UpdateOptions) (*schedulingv1alpha1.Location, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootUpdateSubresourceAction(locationsResource, c.ClusterPath, "status", location), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} - -func (c *locationsClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake.Invokes(kcptesting.NewRootDeleteActionWithOptions(locationsResource, c.ClusterPath, name, opts), &schedulingv1alpha1.Location{}) - return err -} - -func (c *locationsClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := kcptesting.NewRootDeleteCollectionAction(locationsResource, c.ClusterPath, listOpts) - - _, err := c.Fake.Invokes(action, &schedulingv1alpha1.LocationList{}) - return err -} - -func (c *locationsClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*schedulingv1alpha1.Location, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootGetAction(locationsResource, c.ClusterPath, name), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} - -// List takes label and field selectors, and returns the list of Locations that match those selectors. -func (c *locationsClient) List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.LocationList, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootListAction(locationsResource, locationsKind, c.ClusterPath, opts), &schedulingv1alpha1.LocationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &schedulingv1alpha1.LocationList{ListMeta: obj.(*schedulingv1alpha1.LocationList).ListMeta} - for _, item := range obj.(*schedulingv1alpha1.LocationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -func (c *locationsClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(kcptesting.NewRootWatchAction(locationsResource, c.ClusterPath, opts)) -} - -func (c *locationsClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*schedulingv1alpha1.Location, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(locationsResource, c.ClusterPath, name, pt, data, subresources...), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} - -func (c *locationsClient) Apply(ctx context.Context, applyConfiguration *applyconfigurationsschedulingv1alpha1.LocationApplyConfiguration, opts metav1.ApplyOptions) (*schedulingv1alpha1.Location, error) { - if applyConfiguration == nil { - return nil, fmt.Errorf("applyConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(applyConfiguration) - if err != nil { - return nil, err - } - name := applyConfiguration.Name - if name == nil { - return nil, fmt.Errorf("applyConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(locationsResource, c.ClusterPath, *name, types.ApplyPatchType, data), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} - -func (c *locationsClient) ApplyStatus(ctx context.Context, applyConfiguration *applyconfigurationsschedulingv1alpha1.LocationApplyConfiguration, opts metav1.ApplyOptions) (*schedulingv1alpha1.Location, error) { - if applyConfiguration == nil { - return nil, fmt.Errorf("applyConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(applyConfiguration) - if err != nil { - return nil, err - } - name := applyConfiguration.Name - if name == nil { - return nil, fmt.Errorf("applyConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(locationsResource, c.ClusterPath, *name, types.ApplyPatchType, data, "status"), &schedulingv1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Location), err -} diff --git a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/placement.go b/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/placement.go deleted file mode 100644 index cf5d56b4b80..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/placement.go +++ /dev/null @@ -1,202 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package fake - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/kcp-dev/logicalcluster/v3" - - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/testing" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - applyconfigurationsschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -var placementsResource = schema.GroupVersionResource{Group: "scheduling.kcp.io", Version: "v1alpha1", Resource: "placements"} -var placementsKind = schema.GroupVersionKind{Group: "scheduling.kcp.io", Version: "v1alpha1", Kind: "Placement"} - -type placementsClusterClient struct { - *kcptesting.Fake -} - -// Cluster scopes the client down to a particular cluster. -func (c *placementsClusterClient) Cluster(clusterPath logicalcluster.Path) schedulingv1alpha1client.PlacementInterface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - - return &placementsClient{Fake: c.Fake, ClusterPath: clusterPath} -} - -// List takes label and field selectors, and returns the list of Placements that match those selectors across all clusters. -func (c *placementsClusterClient) List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.PlacementList, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootListAction(placementsResource, placementsKind, logicalcluster.Wildcard, opts), &schedulingv1alpha1.PlacementList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &schedulingv1alpha1.PlacementList{ListMeta: obj.(*schedulingv1alpha1.PlacementList).ListMeta} - for _, item := range obj.(*schedulingv1alpha1.PlacementList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested Placements across all clusters. -func (c *placementsClusterClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(kcptesting.NewRootWatchAction(placementsResource, logicalcluster.Wildcard, opts)) -} - -type placementsClient struct { - *kcptesting.Fake - ClusterPath logicalcluster.Path -} - -func (c *placementsClient) Create(ctx context.Context, placement *schedulingv1alpha1.Placement, opts metav1.CreateOptions) (*schedulingv1alpha1.Placement, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootCreateAction(placementsResource, c.ClusterPath, placement), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} - -func (c *placementsClient) Update(ctx context.Context, placement *schedulingv1alpha1.Placement, opts metav1.UpdateOptions) (*schedulingv1alpha1.Placement, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootUpdateAction(placementsResource, c.ClusterPath, placement), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} - -func (c *placementsClient) UpdateStatus(ctx context.Context, placement *schedulingv1alpha1.Placement, opts metav1.UpdateOptions) (*schedulingv1alpha1.Placement, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootUpdateSubresourceAction(placementsResource, c.ClusterPath, "status", placement), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} - -func (c *placementsClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake.Invokes(kcptesting.NewRootDeleteActionWithOptions(placementsResource, c.ClusterPath, name, opts), &schedulingv1alpha1.Placement{}) - return err -} - -func (c *placementsClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := kcptesting.NewRootDeleteCollectionAction(placementsResource, c.ClusterPath, listOpts) - - _, err := c.Fake.Invokes(action, &schedulingv1alpha1.PlacementList{}) - return err -} - -func (c *placementsClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*schedulingv1alpha1.Placement, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootGetAction(placementsResource, c.ClusterPath, name), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} - -// List takes label and field selectors, and returns the list of Placements that match those selectors. -func (c *placementsClient) List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.PlacementList, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootListAction(placementsResource, placementsKind, c.ClusterPath, opts), &schedulingv1alpha1.PlacementList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &schedulingv1alpha1.PlacementList{ListMeta: obj.(*schedulingv1alpha1.PlacementList).ListMeta} - for _, item := range obj.(*schedulingv1alpha1.PlacementList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -func (c *placementsClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(kcptesting.NewRootWatchAction(placementsResource, c.ClusterPath, opts)) -} - -func (c *placementsClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*schedulingv1alpha1.Placement, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(placementsResource, c.ClusterPath, name, pt, data, subresources...), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} - -func (c *placementsClient) Apply(ctx context.Context, applyConfiguration *applyconfigurationsschedulingv1alpha1.PlacementApplyConfiguration, opts metav1.ApplyOptions) (*schedulingv1alpha1.Placement, error) { - if applyConfiguration == nil { - return nil, fmt.Errorf("applyConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(applyConfiguration) - if err != nil { - return nil, err - } - name := applyConfiguration.Name - if name == nil { - return nil, fmt.Errorf("applyConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(placementsResource, c.ClusterPath, *name, types.ApplyPatchType, data), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} - -func (c *placementsClient) ApplyStatus(ctx context.Context, applyConfiguration *applyconfigurationsschedulingv1alpha1.PlacementApplyConfiguration, opts metav1.ApplyOptions) (*schedulingv1alpha1.Placement, error) { - if applyConfiguration == nil { - return nil, fmt.Errorf("applyConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(applyConfiguration) - if err != nil { - return nil, err - } - name := applyConfiguration.Name - if name == nil { - return nil, fmt.Errorf("applyConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(placementsResource, c.ClusterPath, *name, types.ApplyPatchType, data, "status"), &schedulingv1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*schedulingv1alpha1.Placement), err -} diff --git a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/scheduling_client.go b/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/scheduling_client.go deleted file mode 100644 index b11d93d88e3..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/fake/scheduling_client.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package fake - -import ( - "github.com/kcp-dev/logicalcluster/v3" - - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - "k8s.io/client-go/rest" - - kcpschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -var _ kcpschedulingv1alpha1.SchedulingV1alpha1ClusterInterface = (*SchedulingV1alpha1ClusterClient)(nil) - -type SchedulingV1alpha1ClusterClient struct { - *kcptesting.Fake -} - -func (c *SchedulingV1alpha1ClusterClient) Cluster(clusterPath logicalcluster.Path) schedulingv1alpha1.SchedulingV1alpha1Interface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - return &SchedulingV1alpha1Client{Fake: c.Fake, ClusterPath: clusterPath} -} - -func (c *SchedulingV1alpha1ClusterClient) Locations() kcpschedulingv1alpha1.LocationClusterInterface { - return &locationsClusterClient{Fake: c.Fake} -} - -func (c *SchedulingV1alpha1ClusterClient) Placements() kcpschedulingv1alpha1.PlacementClusterInterface { - return &placementsClusterClient{Fake: c.Fake} -} - -var _ schedulingv1alpha1.SchedulingV1alpha1Interface = (*SchedulingV1alpha1Client)(nil) - -type SchedulingV1alpha1Client struct { - *kcptesting.Fake - ClusterPath logicalcluster.Path -} - -func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} - -func (c *SchedulingV1alpha1Client) Locations() schedulingv1alpha1.LocationInterface { - return &locationsClient{Fake: c.Fake, ClusterPath: c.ClusterPath} -} - -func (c *SchedulingV1alpha1Client) Placements() schedulingv1alpha1.PlacementInterface { - return &placementsClient{Fake: c.Fake, ClusterPath: c.ClusterPath} -} diff --git a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/location.go b/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/location.go deleted file mode 100644 index 731ec7e7cfa..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/location.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - - kcpclient "github.com/kcp-dev/apimachinery/v2/pkg/client" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -// LocationsClusterGetter has a method to return a LocationClusterInterface. -// A group's cluster client should implement this interface. -type LocationsClusterGetter interface { - Locations() LocationClusterInterface -} - -// LocationClusterInterface can operate on Locations across all clusters, -// or scope down to one cluster and return a schedulingv1alpha1client.LocationInterface. -type LocationClusterInterface interface { - Cluster(logicalcluster.Path) schedulingv1alpha1client.LocationInterface - List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.LocationList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) -} - -type locationsClusterInterface struct { - clientCache kcpclient.Cache[*schedulingv1alpha1client.SchedulingV1alpha1Client] -} - -// Cluster scopes the client down to a particular cluster. -func (c *locationsClusterInterface) Cluster(clusterPath logicalcluster.Path) schedulingv1alpha1client.LocationInterface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - - return c.clientCache.ClusterOrDie(clusterPath).Locations() -} - -// List returns the entire collection of all Locations across all clusters. -func (c *locationsClusterInterface) List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.LocationList, error) { - return c.clientCache.ClusterOrDie(logicalcluster.Wildcard).Locations().List(ctx, opts) -} - -// Watch begins to watch all Locations across all clusters. -func (c *locationsClusterInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.clientCache.ClusterOrDie(logicalcluster.Wildcard).Locations().Watch(ctx, opts) -} diff --git a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/placement.go b/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/placement.go deleted file mode 100644 index 1013253f2e1..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/placement.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - - kcpclient "github.com/kcp-dev/apimachinery/v2/pkg/client" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -// PlacementsClusterGetter has a method to return a PlacementClusterInterface. -// A group's cluster client should implement this interface. -type PlacementsClusterGetter interface { - Placements() PlacementClusterInterface -} - -// PlacementClusterInterface can operate on Placements across all clusters, -// or scope down to one cluster and return a schedulingv1alpha1client.PlacementInterface. -type PlacementClusterInterface interface { - Cluster(logicalcluster.Path) schedulingv1alpha1client.PlacementInterface - List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.PlacementList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) -} - -type placementsClusterInterface struct { - clientCache kcpclient.Cache[*schedulingv1alpha1client.SchedulingV1alpha1Client] -} - -// Cluster scopes the client down to a particular cluster. -func (c *placementsClusterInterface) Cluster(clusterPath logicalcluster.Path) schedulingv1alpha1client.PlacementInterface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - - return c.clientCache.ClusterOrDie(clusterPath).Placements() -} - -// List returns the entire collection of all Placements across all clusters. -func (c *placementsClusterInterface) List(ctx context.Context, opts metav1.ListOptions) (*schedulingv1alpha1.PlacementList, error) { - return c.clientCache.ClusterOrDie(logicalcluster.Wildcard).Placements().List(ctx, opts) -} - -// Watch begins to watch all Placements across all clusters. -func (c *placementsClusterInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.clientCache.ClusterOrDie(logicalcluster.Wildcard).Placements().Watch(ctx, opts) -} diff --git a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/scheduling_client.go b/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/scheduling_client.go deleted file mode 100644 index eb2b2ff773f..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/scheduling/v1alpha1/scheduling_client.go +++ /dev/null @@ -1,95 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - kcpclient "github.com/kcp-dev/apimachinery/v2/pkg/client" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/client-go/rest" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -type SchedulingV1alpha1ClusterInterface interface { - SchedulingV1alpha1ClusterScoper - LocationsClusterGetter - PlacementsClusterGetter -} - -type SchedulingV1alpha1ClusterScoper interface { - Cluster(logicalcluster.Path) schedulingv1alpha1.SchedulingV1alpha1Interface -} - -type SchedulingV1alpha1ClusterClient struct { - clientCache kcpclient.Cache[*schedulingv1alpha1.SchedulingV1alpha1Client] -} - -func (c *SchedulingV1alpha1ClusterClient) Cluster(clusterPath logicalcluster.Path) schedulingv1alpha1.SchedulingV1alpha1Interface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - return c.clientCache.ClusterOrDie(clusterPath) -} - -func (c *SchedulingV1alpha1ClusterClient) Locations() LocationClusterInterface { - return &locationsClusterInterface{clientCache: c.clientCache} -} - -func (c *SchedulingV1alpha1ClusterClient) Placements() PlacementClusterInterface { - return &placementsClusterInterface{clientCache: c.clientCache} -} - -// NewForConfig creates a new SchedulingV1alpha1ClusterClient for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*SchedulingV1alpha1ClusterClient, error) { - client, err := rest.HTTPClientFor(c) - if err != nil { - return nil, err - } - return NewForConfigAndClient(c, client) -} - -// NewForConfigAndClient creates a new SchedulingV1alpha1ClusterClient for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1alpha1ClusterClient, error) { - cache := kcpclient.NewCache(c, h, &kcpclient.Constructor[*schedulingv1alpha1.SchedulingV1alpha1Client]{ - NewForConfigAndClient: schedulingv1alpha1.NewForConfigAndClient, - }) - if _, err := cache.Cluster(logicalcluster.Name("root").Path()); err != nil { - return nil, err - } - return &SchedulingV1alpha1ClusterClient{clientCache: cache}, nil -} - -// NewForConfigOrDie creates a new SchedulingV1alpha1ClusterClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1ClusterClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} diff --git a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/synctarget.go b/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/synctarget.go deleted file mode 100644 index 9e5170d066a..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/synctarget.go +++ /dev/null @@ -1,202 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package fake - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/kcp-dev/logicalcluster/v3" - - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/testing" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - applyconfigurationsworkloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/workload/v1alpha1" - workloadv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" -) - -var syncTargetsResource = schema.GroupVersionResource{Group: "workload.kcp.io", Version: "v1alpha1", Resource: "synctargets"} -var syncTargetsKind = schema.GroupVersionKind{Group: "workload.kcp.io", Version: "v1alpha1", Kind: "SyncTarget"} - -type syncTargetsClusterClient struct { - *kcptesting.Fake -} - -// Cluster scopes the client down to a particular cluster. -func (c *syncTargetsClusterClient) Cluster(clusterPath logicalcluster.Path) workloadv1alpha1client.SyncTargetInterface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - - return &syncTargetsClient{Fake: c.Fake, ClusterPath: clusterPath} -} - -// List takes label and field selectors, and returns the list of SyncTargets that match those selectors across all clusters. -func (c *syncTargetsClusterClient) List(ctx context.Context, opts metav1.ListOptions) (*workloadv1alpha1.SyncTargetList, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootListAction(syncTargetsResource, syncTargetsKind, logicalcluster.Wildcard, opts), &workloadv1alpha1.SyncTargetList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &workloadv1alpha1.SyncTargetList{ListMeta: obj.(*workloadv1alpha1.SyncTargetList).ListMeta} - for _, item := range obj.(*workloadv1alpha1.SyncTargetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested SyncTargets across all clusters. -func (c *syncTargetsClusterClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(kcptesting.NewRootWatchAction(syncTargetsResource, logicalcluster.Wildcard, opts)) -} - -type syncTargetsClient struct { - *kcptesting.Fake - ClusterPath logicalcluster.Path -} - -func (c *syncTargetsClient) Create(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget, opts metav1.CreateOptions) (*workloadv1alpha1.SyncTarget, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootCreateAction(syncTargetsResource, c.ClusterPath, syncTarget), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} - -func (c *syncTargetsClient) Update(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget, opts metav1.UpdateOptions) (*workloadv1alpha1.SyncTarget, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootUpdateAction(syncTargetsResource, c.ClusterPath, syncTarget), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} - -func (c *syncTargetsClient) UpdateStatus(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget, opts metav1.UpdateOptions) (*workloadv1alpha1.SyncTarget, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootUpdateSubresourceAction(syncTargetsResource, c.ClusterPath, "status", syncTarget), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} - -func (c *syncTargetsClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { - _, err := c.Fake.Invokes(kcptesting.NewRootDeleteActionWithOptions(syncTargetsResource, c.ClusterPath, name, opts), &workloadv1alpha1.SyncTarget{}) - return err -} - -func (c *syncTargetsClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { - action := kcptesting.NewRootDeleteCollectionAction(syncTargetsResource, c.ClusterPath, listOpts) - - _, err := c.Fake.Invokes(action, &workloadv1alpha1.SyncTargetList{}) - return err -} - -func (c *syncTargetsClient) Get(ctx context.Context, name string, options metav1.GetOptions) (*workloadv1alpha1.SyncTarget, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootGetAction(syncTargetsResource, c.ClusterPath, name), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} - -// List takes label and field selectors, and returns the list of SyncTargets that match those selectors. -func (c *syncTargetsClient) List(ctx context.Context, opts metav1.ListOptions) (*workloadv1alpha1.SyncTargetList, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootListAction(syncTargetsResource, syncTargetsKind, c.ClusterPath, opts), &workloadv1alpha1.SyncTargetList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &workloadv1alpha1.SyncTargetList{ListMeta: obj.(*workloadv1alpha1.SyncTargetList).ListMeta} - for _, item := range obj.(*workloadv1alpha1.SyncTargetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -func (c *syncTargetsClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.Fake.InvokesWatch(kcptesting.NewRootWatchAction(syncTargetsResource, c.ClusterPath, opts)) -} - -func (c *syncTargetsClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*workloadv1alpha1.SyncTarget, error) { - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(syncTargetsResource, c.ClusterPath, name, pt, data, subresources...), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} - -func (c *syncTargetsClient) Apply(ctx context.Context, applyConfiguration *applyconfigurationsworkloadv1alpha1.SyncTargetApplyConfiguration, opts metav1.ApplyOptions) (*workloadv1alpha1.SyncTarget, error) { - if applyConfiguration == nil { - return nil, fmt.Errorf("applyConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(applyConfiguration) - if err != nil { - return nil, err - } - name := applyConfiguration.Name - if name == nil { - return nil, fmt.Errorf("applyConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(syncTargetsResource, c.ClusterPath, *name, types.ApplyPatchType, data), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} - -func (c *syncTargetsClient) ApplyStatus(ctx context.Context, applyConfiguration *applyconfigurationsworkloadv1alpha1.SyncTargetApplyConfiguration, opts metav1.ApplyOptions) (*workloadv1alpha1.SyncTarget, error) { - if applyConfiguration == nil { - return nil, fmt.Errorf("applyConfiguration provided to Apply must not be nil") - } - data, err := json.Marshal(applyConfiguration) - if err != nil { - return nil, err - } - name := applyConfiguration.Name - if name == nil { - return nil, fmt.Errorf("applyConfiguration.Name must be provided to Apply") - } - obj, err := c.Fake.Invokes(kcptesting.NewRootPatchSubresourceAction(syncTargetsResource, c.ClusterPath, *name, types.ApplyPatchType, data, "status"), &workloadv1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*workloadv1alpha1.SyncTarget), err -} diff --git a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/workload_client.go b/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/workload_client.go deleted file mode 100644 index ab4be1e189f..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/fake/workload_client.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package fake - -import ( - "github.com/kcp-dev/logicalcluster/v3" - - kcptesting "github.com/kcp-dev/client-go/third_party/k8s.io/client-go/testing" - "k8s.io/client-go/rest" - - kcpworkloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" -) - -var _ kcpworkloadv1alpha1.WorkloadV1alpha1ClusterInterface = (*WorkloadV1alpha1ClusterClient)(nil) - -type WorkloadV1alpha1ClusterClient struct { - *kcptesting.Fake -} - -func (c *WorkloadV1alpha1ClusterClient) Cluster(clusterPath logicalcluster.Path) workloadv1alpha1.WorkloadV1alpha1Interface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - return &WorkloadV1alpha1Client{Fake: c.Fake, ClusterPath: clusterPath} -} - -func (c *WorkloadV1alpha1ClusterClient) SyncTargets() kcpworkloadv1alpha1.SyncTargetClusterInterface { - return &syncTargetsClusterClient{Fake: c.Fake} -} - -var _ workloadv1alpha1.WorkloadV1alpha1Interface = (*WorkloadV1alpha1Client)(nil) - -type WorkloadV1alpha1Client struct { - *kcptesting.Fake - ClusterPath logicalcluster.Path -} - -func (c *WorkloadV1alpha1Client) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} - -func (c *WorkloadV1alpha1Client) SyncTargets() workloadv1alpha1.SyncTargetInterface { - return &syncTargetsClient{Fake: c.Fake, ClusterPath: c.ClusterPath} -} diff --git a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/synctarget.go b/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/synctarget.go deleted file mode 100644 index db10bded09c..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/synctarget.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - - kcpclient "github.com/kcp-dev/apimachinery/v2/pkg/client" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - workloadv1alpha1client "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" -) - -// SyncTargetsClusterGetter has a method to return a SyncTargetClusterInterface. -// A group's cluster client should implement this interface. -type SyncTargetsClusterGetter interface { - SyncTargets() SyncTargetClusterInterface -} - -// SyncTargetClusterInterface can operate on SyncTargets across all clusters, -// or scope down to one cluster and return a workloadv1alpha1client.SyncTargetInterface. -type SyncTargetClusterInterface interface { - Cluster(logicalcluster.Path) workloadv1alpha1client.SyncTargetInterface - List(ctx context.Context, opts metav1.ListOptions) (*workloadv1alpha1.SyncTargetList, error) - Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) -} - -type syncTargetsClusterInterface struct { - clientCache kcpclient.Cache[*workloadv1alpha1client.WorkloadV1alpha1Client] -} - -// Cluster scopes the client down to a particular cluster. -func (c *syncTargetsClusterInterface) Cluster(clusterPath logicalcluster.Path) workloadv1alpha1client.SyncTargetInterface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - - return c.clientCache.ClusterOrDie(clusterPath).SyncTargets() -} - -// List returns the entire collection of all SyncTargets across all clusters. -func (c *syncTargetsClusterInterface) List(ctx context.Context, opts metav1.ListOptions) (*workloadv1alpha1.SyncTargetList, error) { - return c.clientCache.ClusterOrDie(logicalcluster.Wildcard).SyncTargets().List(ctx, opts) -} - -// Watch begins to watch all SyncTargets across all clusters. -func (c *syncTargetsClusterInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.clientCache.ClusterOrDie(logicalcluster.Wildcard).SyncTargets().Watch(ctx, opts) -} diff --git a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/workload_client.go b/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/workload_client.go deleted file mode 100644 index 182d876ddf7..00000000000 --- a/sdk/client/clientset/versioned/cluster/typed/workload/v1alpha1/workload_client.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - kcpclient "github.com/kcp-dev/apimachinery/v2/pkg/client" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/client-go/rest" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" -) - -type WorkloadV1alpha1ClusterInterface interface { - WorkloadV1alpha1ClusterScoper - SyncTargetsClusterGetter -} - -type WorkloadV1alpha1ClusterScoper interface { - Cluster(logicalcluster.Path) workloadv1alpha1.WorkloadV1alpha1Interface -} - -type WorkloadV1alpha1ClusterClient struct { - clientCache kcpclient.Cache[*workloadv1alpha1.WorkloadV1alpha1Client] -} - -func (c *WorkloadV1alpha1ClusterClient) Cluster(clusterPath logicalcluster.Path) workloadv1alpha1.WorkloadV1alpha1Interface { - if clusterPath == logicalcluster.Wildcard { - panic("A specific cluster must be provided when scoping, not the wildcard.") - } - return c.clientCache.ClusterOrDie(clusterPath) -} - -func (c *WorkloadV1alpha1ClusterClient) SyncTargets() SyncTargetClusterInterface { - return &syncTargetsClusterInterface{clientCache: c.clientCache} -} - -// NewForConfig creates a new WorkloadV1alpha1ClusterClient for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*WorkloadV1alpha1ClusterClient, error) { - client, err := rest.HTTPClientFor(c) - if err != nil { - return nil, err - } - return NewForConfigAndClient(c, client) -} - -// NewForConfigAndClient creates a new WorkloadV1alpha1ClusterClient for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*WorkloadV1alpha1ClusterClient, error) { - cache := kcpclient.NewCache(c, h, &kcpclient.Constructor[*workloadv1alpha1.WorkloadV1alpha1Client]{ - NewForConfigAndClient: workloadv1alpha1.NewForConfigAndClient, - }) - if _, err := cache.Cluster(logicalcluster.Name("root").Path()); err != nil { - return nil, err - } - return &WorkloadV1alpha1ClusterClient{clientCache: cache}, nil -} - -// NewForConfigOrDie creates a new WorkloadV1alpha1ClusterClient for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *WorkloadV1alpha1ClusterClient { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} diff --git a/sdk/client/clientset/versioned/fake/clientset_generated.go b/sdk/client/clientset/versioned/fake/clientset_generated.go index 5d6b1d966ae..02a4c29624b 100644 --- a/sdk/client/clientset/versioned/fake/clientset_generated.go +++ b/sdk/client/clientset/versioned/fake/clientset_generated.go @@ -32,14 +32,10 @@ import ( fakeapisv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/apis/v1alpha1/fake" corev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/core/v1alpha1" fakecorev1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/core/v1alpha1/fake" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" - fakeschedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/tenancy/v1alpha1" faketenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/tenancy/v1alpha1/fake" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/topology/v1alpha1" faketopologyv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/topology/v1alpha1/fake" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" - fakeworkloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -107,11 +103,6 @@ func (c *Clientset) CoreV1alpha1() corev1alpha1.CoreV1alpha1Interface { return &fakecorev1alpha1.FakeCoreV1alpha1{Fake: &c.Fake} } -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client -func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { - return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} -} - // TenancyV1alpha1 retrieves the TenancyV1alpha1Client func (c *Clientset) TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1Interface { return &faketenancyv1alpha1.FakeTenancyV1alpha1{Fake: &c.Fake} @@ -121,8 +112,3 @@ func (c *Clientset) TenancyV1alpha1() tenancyv1alpha1.TenancyV1alpha1Interface { func (c *Clientset) TopologyV1alpha1() topologyv1alpha1.TopologyV1alpha1Interface { return &faketopologyv1alpha1.FakeTopologyV1alpha1{Fake: &c.Fake} } - -// WorkloadV1alpha1 retrieves the WorkloadV1alpha1Client -func (c *Clientset) WorkloadV1alpha1() workloadv1alpha1.WorkloadV1alpha1Interface { - return &fakeworkloadv1alpha1.FakeWorkloadV1alpha1{Fake: &c.Fake} -} diff --git a/sdk/client/clientset/versioned/fake/register.go b/sdk/client/clientset/versioned/fake/register.go index b18e85ecab5..5dea70e303f 100644 --- a/sdk/client/clientset/versioned/fake/register.go +++ b/sdk/client/clientset/versioned/fake/register.go @@ -28,10 +28,8 @@ import ( apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) var scheme = runtime.NewScheme() @@ -41,10 +39,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ apiresourcev1alpha1.AddToScheme, apisv1alpha1.AddToScheme, corev1alpha1.AddToScheme, - schedulingv1alpha1.AddToScheme, tenancyv1alpha1.AddToScheme, topologyv1alpha1.AddToScheme, - workloadv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/sdk/client/clientset/versioned/scheme/register.go b/sdk/client/clientset/versioned/scheme/register.go index 2546941130a..dc9670690f2 100644 --- a/sdk/client/clientset/versioned/scheme/register.go +++ b/sdk/client/clientset/versioned/scheme/register.go @@ -28,10 +28,8 @@ import ( apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) var Scheme = runtime.NewScheme() @@ -41,10 +39,8 @@ var localSchemeBuilder = runtime.SchemeBuilder{ apiresourcev1alpha1.AddToScheme, apisv1alpha1.AddToScheme, corev1alpha1.AddToScheme, - schedulingv1alpha1.AddToScheme, tenancyv1alpha1.AddToScheme, topologyv1alpha1.AddToScheme, - workloadv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go deleted file mode 100644 index 364b93c7cfd..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go deleted file mode 100644 index e388f29189e..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_location.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_location.go deleted file mode 100644 index 5bb13e2a41d..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_location.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" -) - -// FakeLocations implements LocationInterface -type FakeLocations struct { - Fake *FakeSchedulingV1alpha1 -} - -var locationsResource = schema.GroupVersionResource{Group: "scheduling.kcp.io", Version: "v1alpha1", Resource: "locations"} - -var locationsKind = schema.GroupVersionKind{Group: "scheduling.kcp.io", Version: "v1alpha1", Kind: "Location"} - -// Get takes name of the location, and returns the corresponding location object, and an error if there is any. -func (c *FakeLocations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Location, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(locationsResource, name), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} - -// List takes label and field selectors, and returns the list of Locations that match those selectors. -func (c *FakeLocations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.LocationList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(locationsResource, locationsKind, opts), &v1alpha1.LocationList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.LocationList{ListMeta: obj.(*v1alpha1.LocationList).ListMeta} - for _, item := range obj.(*v1alpha1.LocationList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested locations. -func (c *FakeLocations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(locationsResource, opts)) -} - -// Create takes the representation of a location and creates it. Returns the server's representation of the location, and an error, if there is any. -func (c *FakeLocations) Create(ctx context.Context, location *v1alpha1.Location, opts v1.CreateOptions) (result *v1alpha1.Location, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(locationsResource, location), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} - -// Update takes the representation of a location and updates it. Returns the server's representation of the location, and an error, if there is any. -func (c *FakeLocations) Update(ctx context.Context, location *v1alpha1.Location, opts v1.UpdateOptions) (result *v1alpha1.Location, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(locationsResource, location), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeLocations) UpdateStatus(ctx context.Context, location *v1alpha1.Location, opts v1.UpdateOptions) (*v1alpha1.Location, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(locationsResource, "status", location), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} - -// Delete takes name of the location and deletes it. Returns an error if one occurs. -func (c *FakeLocations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(locationsResource, name, opts), &v1alpha1.Location{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeLocations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(locationsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.LocationList{}) - return err -} - -// Patch applies the patch and returns the patched location. -func (c *FakeLocations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Location, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(locationsResource, name, pt, data, subresources...), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied location. -func (c *FakeLocations) Apply(ctx context.Context, location *schedulingv1alpha1.LocationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Location, err error) { - if location == nil { - return nil, fmt.Errorf("location provided to Apply must not be nil") - } - data, err := json.Marshal(location) - if err != nil { - return nil, err - } - name := location.Name - if name == nil { - return nil, fmt.Errorf("location.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(locationsResource, *name, types.ApplyPatchType, data), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeLocations) ApplyStatus(ctx context.Context, location *schedulingv1alpha1.LocationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Location, err error) { - if location == nil { - return nil, fmt.Errorf("location provided to Apply must not be nil") - } - data, err := json.Marshal(location) - if err != nil { - return nil, err - } - name := location.Name - if name == nil { - return nil, fmt.Errorf("location.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(locationsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.Location{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Location), err -} diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_placement.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_placement.go deleted file mode 100644 index f04e4b5f50b..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_placement.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" -) - -// FakePlacements implements PlacementInterface -type FakePlacements struct { - Fake *FakeSchedulingV1alpha1 -} - -var placementsResource = schema.GroupVersionResource{Group: "scheduling.kcp.io", Version: "v1alpha1", Resource: "placements"} - -var placementsKind = schema.GroupVersionKind{Group: "scheduling.kcp.io", Version: "v1alpha1", Kind: "Placement"} - -// Get takes name of the placement, and returns the corresponding placement object, and an error if there is any. -func (c *FakePlacements) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Placement, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(placementsResource, name), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} - -// List takes label and field selectors, and returns the list of Placements that match those selectors. -func (c *FakePlacements) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlacementList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(placementsResource, placementsKind, opts), &v1alpha1.PlacementList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PlacementList{ListMeta: obj.(*v1alpha1.PlacementList).ListMeta} - for _, item := range obj.(*v1alpha1.PlacementList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested placements. -func (c *FakePlacements) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(placementsResource, opts)) -} - -// Create takes the representation of a placement and creates it. Returns the server's representation of the placement, and an error, if there is any. -func (c *FakePlacements) Create(ctx context.Context, placement *v1alpha1.Placement, opts v1.CreateOptions) (result *v1alpha1.Placement, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(placementsResource, placement), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} - -// Update takes the representation of a placement and updates it. Returns the server's representation of the placement, and an error, if there is any. -func (c *FakePlacements) Update(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (result *v1alpha1.Placement, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(placementsResource, placement), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePlacements) UpdateStatus(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (*v1alpha1.Placement, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(placementsResource, "status", placement), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} - -// Delete takes name of the placement and deletes it. Returns an error if one occurs. -func (c *FakePlacements) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(placementsResource, name, opts), &v1alpha1.Placement{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePlacements) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(placementsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PlacementList{}) - return err -} - -// Patch applies the patch and returns the patched placement. -func (c *FakePlacements) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Placement, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(placementsResource, name, pt, data, subresources...), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied placement. -func (c *FakePlacements) Apply(ctx context.Context, placement *schedulingv1alpha1.PlacementApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Placement, err error) { - if placement == nil { - return nil, fmt.Errorf("placement provided to Apply must not be nil") - } - data, err := json.Marshal(placement) - if err != nil { - return nil, err - } - name := placement.Name - if name == nil { - return nil, fmt.Errorf("placement.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(placementsResource, *name, types.ApplyPatchType, data), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePlacements) ApplyStatus(ctx context.Context, placement *schedulingv1alpha1.PlacementApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Placement, err error) { - if placement == nil { - return nil, fmt.Errorf("placement provided to Apply must not be nil") - } - data, err := json.Marshal(placement) - if err != nil { - return nil, err - } - name := placement.Name - if name == nil { - return nil, fmt.Errorf("placement.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(placementsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.Placement{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Placement), err -} diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go deleted file mode 100644 index 674a42d2719..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/scheduling/v1alpha1" -) - -type FakeSchedulingV1alpha1 struct { - *testing.Fake -} - -func (c *FakeSchedulingV1alpha1) Locations() v1alpha1.LocationInterface { - return &FakeLocations{c} -} - -func (c *FakeSchedulingV1alpha1) Placements() v1alpha1.PlacementInterface { - return &FakePlacements{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeSchedulingV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go deleted file mode 100644 index 4401413cf19..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type LocationExpansion interface{} - -type PlacementExpansion interface{} diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/location.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/location.go deleted file mode 100644 index 88d9562533c..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/location.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" - scheme "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" -) - -// LocationsGetter has a method to return a LocationInterface. -// A group's client should implement this interface. -type LocationsGetter interface { - Locations() LocationInterface -} - -// LocationInterface has methods to work with Location resources. -type LocationInterface interface { - Create(ctx context.Context, location *v1alpha1.Location, opts v1.CreateOptions) (*v1alpha1.Location, error) - Update(ctx context.Context, location *v1alpha1.Location, opts v1.UpdateOptions) (*v1alpha1.Location, error) - UpdateStatus(ctx context.Context, location *v1alpha1.Location, opts v1.UpdateOptions) (*v1alpha1.Location, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Location, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.LocationList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Location, err error) - Apply(ctx context.Context, location *schedulingv1alpha1.LocationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Location, err error) - ApplyStatus(ctx context.Context, location *schedulingv1alpha1.LocationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Location, err error) - LocationExpansion -} - -// locations implements LocationInterface -type locations struct { - client rest.Interface -} - -// newLocations returns a Locations -func newLocations(c *SchedulingV1alpha1Client) *locations { - return &locations{ - client: c.RESTClient(), - } -} - -// Get takes name of the location, and returns the corresponding location object, and an error if there is any. -func (c *locations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Location, err error) { - result = &v1alpha1.Location{} - err = c.client.Get(). - Resource("locations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Locations that match those selectors. -func (c *locations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.LocationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.LocationList{} - err = c.client.Get(). - Resource("locations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested locations. -func (c *locations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("locations"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a location and creates it. Returns the server's representation of the location, and an error, if there is any. -func (c *locations) Create(ctx context.Context, location *v1alpha1.Location, opts v1.CreateOptions) (result *v1alpha1.Location, err error) { - result = &v1alpha1.Location{} - err = c.client.Post(). - Resource("locations"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(location). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a location and updates it. Returns the server's representation of the location, and an error, if there is any. -func (c *locations) Update(ctx context.Context, location *v1alpha1.Location, opts v1.UpdateOptions) (result *v1alpha1.Location, err error) { - result = &v1alpha1.Location{} - err = c.client.Put(). - Resource("locations"). - Name(location.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(location). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *locations) UpdateStatus(ctx context.Context, location *v1alpha1.Location, opts v1.UpdateOptions) (result *v1alpha1.Location, err error) { - result = &v1alpha1.Location{} - err = c.client.Put(). - Resource("locations"). - Name(location.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(location). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the location and deletes it. Returns an error if one occurs. -func (c *locations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("locations"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *locations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("locations"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched location. -func (c *locations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Location, err error) { - result = &v1alpha1.Location{} - err = c.client.Patch(pt). - Resource("locations"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied location. -func (c *locations) Apply(ctx context.Context, location *schedulingv1alpha1.LocationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Location, err error) { - if location == nil { - return nil, fmt.Errorf("location provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(location) - if err != nil { - return nil, err - } - name := location.Name - if name == nil { - return nil, fmt.Errorf("location.Name must be provided to Apply") - } - result = &v1alpha1.Location{} - err = c.client.Patch(types.ApplyPatchType). - Resource("locations"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *locations) ApplyStatus(ctx context.Context, location *schedulingv1alpha1.LocationApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Location, err error) { - if location == nil { - return nil, fmt.Errorf("location provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(location) - if err != nil { - return nil, err - } - - name := location.Name - if name == nil { - return nil, fmt.Errorf("location.Name must be provided to Apply") - } - - result = &v1alpha1.Location{} - err = c.client.Patch(types.ApplyPatchType). - Resource("locations"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/placement.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/placement.go deleted file mode 100644 index 2f9871b3dc6..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/placement.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/scheduling/v1alpha1" - scheme "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" -) - -// PlacementsGetter has a method to return a PlacementInterface. -// A group's client should implement this interface. -type PlacementsGetter interface { - Placements() PlacementInterface -} - -// PlacementInterface has methods to work with Placement resources. -type PlacementInterface interface { - Create(ctx context.Context, placement *v1alpha1.Placement, opts v1.CreateOptions) (*v1alpha1.Placement, error) - Update(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (*v1alpha1.Placement, error) - UpdateStatus(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (*v1alpha1.Placement, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Placement, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PlacementList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Placement, err error) - Apply(ctx context.Context, placement *schedulingv1alpha1.PlacementApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Placement, err error) - ApplyStatus(ctx context.Context, placement *schedulingv1alpha1.PlacementApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Placement, err error) - PlacementExpansion -} - -// placements implements PlacementInterface -type placements struct { - client rest.Interface -} - -// newPlacements returns a Placements -func newPlacements(c *SchedulingV1alpha1Client) *placements { - return &placements{ - client: c.RESTClient(), - } -} - -// Get takes name of the placement, and returns the corresponding placement object, and an error if there is any. -func (c *placements) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Get(). - Resource("placements"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Placements that match those selectors. -func (c *placements) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PlacementList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PlacementList{} - err = c.client.Get(). - Resource("placements"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested placements. -func (c *placements) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("placements"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a placement and creates it. Returns the server's representation of the placement, and an error, if there is any. -func (c *placements) Create(ctx context.Context, placement *v1alpha1.Placement, opts v1.CreateOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Post(). - Resource("placements"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placement). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a placement and updates it. Returns the server's representation of the placement, and an error, if there is any. -func (c *placements) Update(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Put(). - Resource("placements"). - Name(placement.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placement). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *placements) UpdateStatus(ctx context.Context, placement *v1alpha1.Placement, opts v1.UpdateOptions) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Put(). - Resource("placements"). - Name(placement.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(placement). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the placement and deletes it. Returns an error if one occurs. -func (c *placements) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("placements"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *placements) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("placements"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched placement. -func (c *placements) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Placement, err error) { - result = &v1alpha1.Placement{} - err = c.client.Patch(pt). - Resource("placements"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied placement. -func (c *placements) Apply(ctx context.Context, placement *schedulingv1alpha1.PlacementApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Placement, err error) { - if placement == nil { - return nil, fmt.Errorf("placement provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(placement) - if err != nil { - return nil, err - } - name := placement.Name - if name == nil { - return nil, fmt.Errorf("placement.Name must be provided to Apply") - } - result = &v1alpha1.Placement{} - err = c.client.Patch(types.ApplyPatchType). - Resource("placements"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *placements) ApplyStatus(ctx context.Context, placement *schedulingv1alpha1.PlacementApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.Placement, err error) { - if placement == nil { - return nil, fmt.Errorf("placement provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(placement) - if err != nil { - return nil, err - } - - name := placement.Name - if name == nil { - return nil, fmt.Errorf("placement.Name must be provided to Apply") - } - - result = &v1alpha1.Placement{} - err = c.client.Patch(types.ApplyPatchType). - Resource("placements"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go b/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go deleted file mode 100644 index 3f99b675e4a..00000000000 --- a/sdk/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - rest "k8s.io/client-go/rest" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" -) - -type SchedulingV1alpha1Interface interface { - RESTClient() rest.Interface - LocationsGetter - PlacementsGetter -} - -// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.kcp.io group. -type SchedulingV1alpha1Client struct { - restClient rest.Interface -} - -func (c *SchedulingV1alpha1Client) Locations() LocationInterface { - return newLocations(c) -} - -func (c *SchedulingV1alpha1Client) Placements() PlacementInterface { - return newPlacements(c) -} - -// NewForConfig creates a new SchedulingV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new SchedulingV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SchedulingV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &SchedulingV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SchedulingV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *SchedulingV1alpha1Client { - return &SchedulingV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/doc.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/doc.go deleted file mode 100644 index 364b93c7cfd..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/doc.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/doc.go deleted file mode 100644 index e388f29189e..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_synctarget.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_synctarget.go deleted file mode 100644 index 5688df73a42..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_synctarget.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/workload/v1alpha1" -) - -// FakeSyncTargets implements SyncTargetInterface -type FakeSyncTargets struct { - Fake *FakeWorkloadV1alpha1 -} - -var synctargetsResource = schema.GroupVersionResource{Group: "workload.kcp.io", Version: "v1alpha1", Resource: "synctargets"} - -var synctargetsKind = schema.GroupVersionKind{Group: "workload.kcp.io", Version: "v1alpha1", Kind: "SyncTarget"} - -// Get takes name of the syncTarget, and returns the corresponding syncTarget object, and an error if there is any. -func (c *FakeSyncTargets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.SyncTarget, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(synctargetsResource, name), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} - -// List takes label and field selectors, and returns the list of SyncTargets that match those selectors. -func (c *FakeSyncTargets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.SyncTargetList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(synctargetsResource, synctargetsKind, opts), &v1alpha1.SyncTargetList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.SyncTargetList{ListMeta: obj.(*v1alpha1.SyncTargetList).ListMeta} - for _, item := range obj.(*v1alpha1.SyncTargetList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested syncTargets. -func (c *FakeSyncTargets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(synctargetsResource, opts)) -} - -// Create takes the representation of a syncTarget and creates it. Returns the server's representation of the syncTarget, and an error, if there is any. -func (c *FakeSyncTargets) Create(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.CreateOptions) (result *v1alpha1.SyncTarget, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(synctargetsResource, syncTarget), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} - -// Update takes the representation of a syncTarget and updates it. Returns the server's representation of the syncTarget, and an error, if there is any. -func (c *FakeSyncTargets) Update(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.UpdateOptions) (result *v1alpha1.SyncTarget, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(synctargetsResource, syncTarget), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeSyncTargets) UpdateStatus(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.UpdateOptions) (*v1alpha1.SyncTarget, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(synctargetsResource, "status", syncTarget), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} - -// Delete takes name of the syncTarget and deletes it. Returns an error if one occurs. -func (c *FakeSyncTargets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(synctargetsResource, name, opts), &v1alpha1.SyncTarget{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeSyncTargets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(synctargetsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.SyncTargetList{}) - return err -} - -// Patch applies the patch and returns the patched syncTarget. -func (c *FakeSyncTargets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SyncTarget, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(synctargetsResource, name, pt, data, subresources...), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied syncTarget. -func (c *FakeSyncTargets) Apply(ctx context.Context, syncTarget *workloadv1alpha1.SyncTargetApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.SyncTarget, err error) { - if syncTarget == nil { - return nil, fmt.Errorf("syncTarget provided to Apply must not be nil") - } - data, err := json.Marshal(syncTarget) - if err != nil { - return nil, err - } - name := syncTarget.Name - if name == nil { - return nil, fmt.Errorf("syncTarget.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(synctargetsResource, *name, types.ApplyPatchType, data), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakeSyncTargets) ApplyStatus(ctx context.Context, syncTarget *workloadv1alpha1.SyncTargetApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.SyncTarget, err error) { - if syncTarget == nil { - return nil, fmt.Errorf("syncTarget provided to Apply must not be nil") - } - data, err := json.Marshal(syncTarget) - if err != nil { - return nil, err - } - name := syncTarget.Name - if name == nil { - return nil, fmt.Errorf("syncTarget.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(synctargetsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.SyncTarget{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.SyncTarget), err -} diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_workload_client.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_workload_client.go deleted file mode 100644 index c089c629007..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/fake/fake_workload_client.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/typed/workload/v1alpha1" -) - -type FakeWorkloadV1alpha1 struct { - *testing.Fake -} - -func (c *FakeWorkloadV1alpha1) SyncTargets() v1alpha1.SyncTargetInterface { - return &FakeSyncTargets{c} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeWorkloadV1alpha1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/generated_expansion.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/generated_expansion.go deleted file mode 100644 index 0841871b53d..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type SyncTargetExpansion interface{} diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/synctarget.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/synctarget.go deleted file mode 100644 index 5c3a5df66d4..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/synctarget.go +++ /dev/null @@ -1,244 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/client/applyconfiguration/workload/v1alpha1" - scheme "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" -) - -// SyncTargetsGetter has a method to return a SyncTargetInterface. -// A group's client should implement this interface. -type SyncTargetsGetter interface { - SyncTargets() SyncTargetInterface -} - -// SyncTargetInterface has methods to work with SyncTarget resources. -type SyncTargetInterface interface { - Create(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.CreateOptions) (*v1alpha1.SyncTarget, error) - Update(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.UpdateOptions) (*v1alpha1.SyncTarget, error) - UpdateStatus(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.UpdateOptions) (*v1alpha1.SyncTarget, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.SyncTarget, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.SyncTargetList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SyncTarget, err error) - Apply(ctx context.Context, syncTarget *workloadv1alpha1.SyncTargetApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.SyncTarget, err error) - ApplyStatus(ctx context.Context, syncTarget *workloadv1alpha1.SyncTargetApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.SyncTarget, err error) - SyncTargetExpansion -} - -// syncTargets implements SyncTargetInterface -type syncTargets struct { - client rest.Interface -} - -// newSyncTargets returns a SyncTargets -func newSyncTargets(c *WorkloadV1alpha1Client) *syncTargets { - return &syncTargets{ - client: c.RESTClient(), - } -} - -// Get takes name of the syncTarget, and returns the corresponding syncTarget object, and an error if there is any. -func (c *syncTargets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.SyncTarget, err error) { - result = &v1alpha1.SyncTarget{} - err = c.client.Get(). - Resource("synctargets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of SyncTargets that match those selectors. -func (c *syncTargets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.SyncTargetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.SyncTargetList{} - err = c.client.Get(). - Resource("synctargets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested syncTargets. -func (c *syncTargets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("synctargets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a syncTarget and creates it. Returns the server's representation of the syncTarget, and an error, if there is any. -func (c *syncTargets) Create(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.CreateOptions) (result *v1alpha1.SyncTarget, err error) { - result = &v1alpha1.SyncTarget{} - err = c.client.Post(). - Resource("synctargets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(syncTarget). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a syncTarget and updates it. Returns the server's representation of the syncTarget, and an error, if there is any. -func (c *syncTargets) Update(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.UpdateOptions) (result *v1alpha1.SyncTarget, err error) { - result = &v1alpha1.SyncTarget{} - err = c.client.Put(). - Resource("synctargets"). - Name(syncTarget.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(syncTarget). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *syncTargets) UpdateStatus(ctx context.Context, syncTarget *v1alpha1.SyncTarget, opts v1.UpdateOptions) (result *v1alpha1.SyncTarget, err error) { - result = &v1alpha1.SyncTarget{} - err = c.client.Put(). - Resource("synctargets"). - Name(syncTarget.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(syncTarget). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the syncTarget and deletes it. Returns an error if one occurs. -func (c *syncTargets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("synctargets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *syncTargets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("synctargets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched syncTarget. -func (c *syncTargets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.SyncTarget, err error) { - result = &v1alpha1.SyncTarget{} - err = c.client.Patch(pt). - Resource("synctargets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied syncTarget. -func (c *syncTargets) Apply(ctx context.Context, syncTarget *workloadv1alpha1.SyncTargetApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.SyncTarget, err error) { - if syncTarget == nil { - return nil, fmt.Errorf("syncTarget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(syncTarget) - if err != nil { - return nil, err - } - name := syncTarget.Name - if name == nil { - return nil, fmt.Errorf("syncTarget.Name must be provided to Apply") - } - result = &v1alpha1.SyncTarget{} - err = c.client.Patch(types.ApplyPatchType). - Resource("synctargets"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *syncTargets) ApplyStatus(ctx context.Context, syncTarget *workloadv1alpha1.SyncTargetApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.SyncTarget, err error) { - if syncTarget == nil { - return nil, fmt.Errorf("syncTarget provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(syncTarget) - if err != nil { - return nil, err - } - - name := syncTarget.Name - if name == nil { - return nil, fmt.Errorf("syncTarget.Name must be provided to Apply") - } - - result = &v1alpha1.SyncTarget{} - err = c.client.Patch(types.ApplyPatchType). - Resource("synctargets"). - Name(*name). - SubResource("status"). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/sdk/client/clientset/versioned/typed/workload/v1alpha1/workload_client.go b/sdk/client/clientset/versioned/typed/workload/v1alpha1/workload_client.go deleted file mode 100644 index e3869099879..00000000000 --- a/sdk/client/clientset/versioned/typed/workload/v1alpha1/workload_client.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "net/http" - - rest "k8s.io/client-go/rest" - - v1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" -) - -type WorkloadV1alpha1Interface interface { - RESTClient() rest.Interface - SyncTargetsGetter -} - -// WorkloadV1alpha1Client is used to interact with features provided by the workload.kcp.io group. -type WorkloadV1alpha1Client struct { - restClient rest.Interface -} - -func (c *WorkloadV1alpha1Client) SyncTargets() SyncTargetInterface { - return newSyncTargets(c) -} - -// NewForConfig creates a new WorkloadV1alpha1Client for the given config. -// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), -// where httpClient was generated with rest.HTTPClientFor(c). -func NewForConfig(c *rest.Config) (*WorkloadV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - httpClient, err := rest.HTTPClientFor(&config) - if err != nil { - return nil, err - } - return NewForConfigAndClient(&config, httpClient) -} - -// NewForConfigAndClient creates a new WorkloadV1alpha1Client for the given config and http client. -// Note the http client provided takes precedence over the configured transport values. -func NewForConfigAndClient(c *rest.Config, h *http.Client) (*WorkloadV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientForConfigAndClient(&config, h) - if err != nil { - return nil, err - } - return &WorkloadV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new WorkloadV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *WorkloadV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new WorkloadV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *WorkloadV1alpha1Client { - return &WorkloadV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *WorkloadV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/sdk/client/informers/externalversions/factory.go b/sdk/client/informers/externalversions/factory.go index fa6b4e4a8cb..7a0bb20ddbf 100644 --- a/sdk/client/informers/externalversions/factory.go +++ b/sdk/client/informers/externalversions/factory.go @@ -40,10 +40,8 @@ import ( apisinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis" coreinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/core" "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" - schedulinginformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling" tenancyinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/tenancy" topologyinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/topology" - workloadinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload" ) // SharedInformerOption defines the functional option type for SharedInformerFactory. @@ -267,10 +265,8 @@ type SharedInformerFactory interface { Apiresource() apiresourceinformers.ClusterInterface Apis() apisinformers.ClusterInterface Core() coreinformers.ClusterInterface - Scheduling() schedulinginformers.ClusterInterface Tenancy() tenancyinformers.ClusterInterface Topology() topologyinformers.ClusterInterface - Workload() workloadinformers.ClusterInterface } func (f *sharedInformerFactory) Apiresource() apiresourceinformers.ClusterInterface { @@ -285,10 +281,6 @@ func (f *sharedInformerFactory) Core() coreinformers.ClusterInterface { return coreinformers.New(f, f.tweakListOptions) } -func (f *sharedInformerFactory) Scheduling() schedulinginformers.ClusterInterface { - return schedulinginformers.New(f, f.tweakListOptions) -} - func (f *sharedInformerFactory) Tenancy() tenancyinformers.ClusterInterface { return tenancyinformers.New(f, f.tweakListOptions) } @@ -297,10 +289,6 @@ func (f *sharedInformerFactory) Topology() topologyinformers.ClusterInterface { return topologyinformers.New(f, f.tweakListOptions) } -func (f *sharedInformerFactory) Workload() workloadinformers.ClusterInterface { - return workloadinformers.New(f, f.tweakListOptions) -} - func (f *sharedInformerFactory) Cluster(clusterName logicalcluster.Name) ScopedDynamicSharedInformerFactory { return &scopedDynamicSharedInformerFactory{ sharedInformerFactory: f, @@ -446,10 +434,8 @@ type SharedScopedInformerFactory interface { Apiresource() apiresourceinformers.Interface Apis() apisinformers.Interface Core() coreinformers.Interface - Scheduling() schedulinginformers.Interface Tenancy() tenancyinformers.Interface Topology() topologyinformers.Interface - Workload() workloadinformers.Interface } func (f *sharedScopedInformerFactory) Apiresource() apiresourceinformers.Interface { @@ -464,10 +450,6 @@ func (f *sharedScopedInformerFactory) Core() coreinformers.Interface { return coreinformers.NewScoped(f, f.namespace, f.tweakListOptions) } -func (f *sharedScopedInformerFactory) Scheduling() schedulinginformers.Interface { - return schedulinginformers.NewScoped(f, f.namespace, f.tweakListOptions) -} - func (f *sharedScopedInformerFactory) Tenancy() tenancyinformers.Interface { return tenancyinformers.NewScoped(f, f.namespace, f.tweakListOptions) } @@ -475,7 +457,3 @@ func (f *sharedScopedInformerFactory) Tenancy() tenancyinformers.Interface { func (f *sharedScopedInformerFactory) Topology() topologyinformers.Interface { return topologyinformers.NewScoped(f, f.namespace, f.tweakListOptions) } - -func (f *sharedScopedInformerFactory) Workload() workloadinformers.Interface { - return workloadinformers.NewScoped(f, f.namespace, f.tweakListOptions) -} diff --git a/sdk/client/informers/externalversions/generic.go b/sdk/client/informers/externalversions/generic.go index 1296bc89a6b..c741e16a0b0 100644 --- a/sdk/client/informers/externalversions/generic.go +++ b/sdk/client/informers/externalversions/generic.go @@ -33,10 +33,8 @@ import ( apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" topologyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/topology/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" ) type GenericClusterInformer interface { @@ -113,11 +111,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().LogicalClusters().Informer()}, nil case corev1alpha1.SchemeGroupVersion.WithResource("shards"): return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Core().V1alpha1().Shards().Informer()}, nil - // Group=scheduling.kcp.io, Version=V1alpha1 - case schedulingv1alpha1.SchemeGroupVersion.WithResource("locations"): - return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().Locations().Informer()}, nil - case schedulingv1alpha1.SchemeGroupVersion.WithResource("placements"): - return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().Placements().Informer()}, nil // Group=tenancy.kcp.io, Version=V1alpha1 case tenancyv1alpha1.SchemeGroupVersion.WithResource("workspaces"): return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Tenancy().V1alpha1().Workspaces().Informer()}, nil @@ -128,9 +121,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Topology().V1alpha1().Partitions().Informer()}, nil case topologyv1alpha1.SchemeGroupVersion.WithResource("partitionsets"): return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Topology().V1alpha1().PartitionSets().Informer()}, nil - // Group=workload.kcp.io, Version=V1alpha1 - case workloadv1alpha1.SchemeGroupVersion.WithResource("synctargets"): - return &genericClusterInformer{resource: resource.GroupResource(), informer: f.Workload().V1alpha1().SyncTargets().Informer()}, nil } return nil, fmt.Errorf("no informer found for %v", resource) @@ -170,13 +160,6 @@ func (f *sharedScopedInformerFactory) ForResource(resource schema.GroupVersionRe case corev1alpha1.SchemeGroupVersion.WithResource("shards"): informer := f.Core().V1alpha1().Shards().Informer() return &genericInformer{lister: cache.NewGenericLister(informer.GetIndexer(), resource.GroupResource()), informer: informer}, nil - // Group=scheduling.kcp.io, Version=V1alpha1 - case schedulingv1alpha1.SchemeGroupVersion.WithResource("locations"): - informer := f.Scheduling().V1alpha1().Locations().Informer() - return &genericInformer{lister: cache.NewGenericLister(informer.GetIndexer(), resource.GroupResource()), informer: informer}, nil - case schedulingv1alpha1.SchemeGroupVersion.WithResource("placements"): - informer := f.Scheduling().V1alpha1().Placements().Informer() - return &genericInformer{lister: cache.NewGenericLister(informer.GetIndexer(), resource.GroupResource()), informer: informer}, nil // Group=tenancy.kcp.io, Version=V1alpha1 case tenancyv1alpha1.SchemeGroupVersion.WithResource("workspaces"): informer := f.Tenancy().V1alpha1().Workspaces().Informer() @@ -191,10 +174,6 @@ func (f *sharedScopedInformerFactory) ForResource(resource schema.GroupVersionRe case topologyv1alpha1.SchemeGroupVersion.WithResource("partitionsets"): informer := f.Topology().V1alpha1().PartitionSets().Informer() return &genericInformer{lister: cache.NewGenericLister(informer.GetIndexer(), resource.GroupResource()), informer: informer}, nil - // Group=workload.kcp.io, Version=V1alpha1 - case workloadv1alpha1.SchemeGroupVersion.WithResource("synctargets"): - informer := f.Workload().V1alpha1().SyncTargets().Informer() - return &genericInformer{lister: cache.NewGenericLister(informer.GetIndexer(), resource.GroupResource()), informer: informer}, nil } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/sdk/client/informers/externalversions/scheduling/interface.go b/sdk/client/informers/externalversions/scheduling/interface.go deleted file mode 100644 index 31aa4848029..00000000000 --- a/sdk/client/informers/externalversions/scheduling/interface.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package scheduling - -import ( - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/scheduling/v1alpha1" -) - -type ClusterInterface interface { - // V1alpha1 provides access to the shared informers in V1alpha1. - V1alpha1() v1alpha1.ClusterInterface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new ClusterInterface. -func New(f internalinterfaces.SharedInformerFactory, tweakListOptions internalinterfaces.TweakListOptionsFunc) ClusterInterface { - return &group{factory: f, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.ClusterInterface. -func (g *group) V1alpha1() v1alpha1.ClusterInterface { - return v1alpha1.New(g.factory, g.tweakListOptions) -} - -type Interface interface { - // V1alpha1 provides access to the shared informers in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type scopedGroup struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// New returns a new Interface. -func NewScoped(f internalinterfaces.SharedScopedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &scopedGroup{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.ClusterInterface. -func (g *scopedGroup) V1alpha1() v1alpha1.Interface { - return v1alpha1.NewScoped(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/sdk/client/informers/externalversions/scheduling/v1alpha1/interface.go b/sdk/client/informers/externalversions/scheduling/v1alpha1/interface.go deleted file mode 100644 index bb2fed3a0ad..00000000000 --- a/sdk/client/informers/externalversions/scheduling/v1alpha1/interface.go +++ /dev/null @@ -1,81 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" -) - -type ClusterInterface interface { - // Locations returns a LocationClusterInformer - Locations() LocationClusterInformer - // Placements returns a PlacementClusterInformer - Placements() PlacementClusterInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new ClusterInterface. -func New(f internalinterfaces.SharedInformerFactory, tweakListOptions internalinterfaces.TweakListOptionsFunc) ClusterInterface { - return &version{factory: f, tweakListOptions: tweakListOptions} -} - -// Locations returns a LocationClusterInformer -func (v *version) Locations() LocationClusterInformer { - return &locationClusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// Placements returns a PlacementClusterInformer -func (v *version) Placements() PlacementClusterInformer { - return &placementClusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -type Interface interface { - // Locations returns a LocationInformer - Locations() LocationInformer - // Placements returns a PlacementInformer - Placements() PlacementInformer -} - -type scopedVersion struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// New returns a new ClusterInterface. -func NewScoped(f internalinterfaces.SharedScopedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &scopedVersion{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Locations returns a LocationInformer -func (v *scopedVersion) Locations() LocationInformer { - return &locationScopedInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// Placements returns a PlacementInformer -func (v *scopedVersion) Placements() PlacementInformer { - return &placementScopedInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/sdk/client/informers/externalversions/scheduling/v1alpha1/location.go b/sdk/client/informers/externalversions/scheduling/v1alpha1/location.go deleted file mode 100644 index 5be251b4c31..00000000000 --- a/sdk/client/informers/externalversions/scheduling/v1alpha1/location.go +++ /dev/null @@ -1,179 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpinformers "github.com/kcp-dev/apimachinery/v2/third_party/informers" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - scopedclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" - clientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" - schedulingv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/scheduling/v1alpha1" -) - -// LocationClusterInformer provides access to a shared informer and lister for -// Locations. -type LocationClusterInformer interface { - Cluster(logicalcluster.Name) LocationInformer - Informer() kcpcache.ScopeableSharedIndexInformer - Lister() schedulingv1alpha1listers.LocationClusterLister -} - -type locationClusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewLocationClusterInformer constructs a new informer for Location type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewLocationClusterInformer(client clientset.ClusterInterface, resyncPeriod time.Duration, indexers cache.Indexers) kcpcache.ScopeableSharedIndexInformer { - return NewFilteredLocationClusterInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredLocationClusterInformer constructs a new informer for Location type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredLocationClusterInformer(client clientset.ClusterInterface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) kcpcache.ScopeableSharedIndexInformer { - return kcpinformers.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Locations().List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Locations().Watch(context.TODO(), options) - }, - }, - &schedulingv1alpha1.Location{}, - resyncPeriod, - indexers, - ) -} - -func (f *locationClusterInformer) defaultInformer(client clientset.ClusterInterface, resyncPeriod time.Duration) kcpcache.ScopeableSharedIndexInformer { - return NewFilteredLocationClusterInformer(client, resyncPeriod, cache.Indexers{ - kcpcache.ClusterIndexName: kcpcache.ClusterIndexFunc, - }, - f.tweakListOptions, - ) -} - -func (f *locationClusterInformer) Informer() kcpcache.ScopeableSharedIndexInformer { - return f.factory.InformerFor(&schedulingv1alpha1.Location{}, f.defaultInformer) -} - -func (f *locationClusterInformer) Lister() schedulingv1alpha1listers.LocationClusterLister { - return schedulingv1alpha1listers.NewLocationClusterLister(f.Informer().GetIndexer()) -} - -// LocationInformer provides access to a shared informer and lister for -// Locations. -type LocationInformer interface { - Informer() cache.SharedIndexInformer - Lister() schedulingv1alpha1listers.LocationLister -} - -func (f *locationClusterInformer) Cluster(clusterName logicalcluster.Name) LocationInformer { - return &locationInformer{ - informer: f.Informer().Cluster(clusterName), - lister: f.Lister().Cluster(clusterName), - } -} - -type locationInformer struct { - informer cache.SharedIndexInformer - lister schedulingv1alpha1listers.LocationLister -} - -func (f *locationInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -func (f *locationInformer) Lister() schedulingv1alpha1listers.LocationLister { - return f.lister -} - -type locationScopedInformer struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -func (f *locationScopedInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1alpha1.Location{}, f.defaultInformer) -} - -func (f *locationScopedInformer) Lister() schedulingv1alpha1listers.LocationLister { - return schedulingv1alpha1listers.NewLocationLister(f.Informer().GetIndexer()) -} - -// NewLocationInformer constructs a new informer for Location type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewLocationInformer(client scopedclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredLocationInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredLocationInformer constructs a new informer for Location type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredLocationInformer(client scopedclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Locations().List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Locations().Watch(context.TODO(), options) - }, - }, - &schedulingv1alpha1.Location{}, - resyncPeriod, - indexers, - ) -} - -func (f *locationScopedInformer) defaultInformer(client scopedclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredLocationInformer(client, resyncPeriod, cache.Indexers{}, f.tweakListOptions) -} diff --git a/sdk/client/informers/externalversions/scheduling/v1alpha1/placement.go b/sdk/client/informers/externalversions/scheduling/v1alpha1/placement.go deleted file mode 100644 index 87558e76855..00000000000 --- a/sdk/client/informers/externalversions/scheduling/v1alpha1/placement.go +++ /dev/null @@ -1,179 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpinformers "github.com/kcp-dev/apimachinery/v2/third_party/informers" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - scopedclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" - clientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" - schedulingv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/scheduling/v1alpha1" -) - -// PlacementClusterInformer provides access to a shared informer and lister for -// Placements. -type PlacementClusterInformer interface { - Cluster(logicalcluster.Name) PlacementInformer - Informer() kcpcache.ScopeableSharedIndexInformer - Lister() schedulingv1alpha1listers.PlacementClusterLister -} - -type placementClusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewPlacementClusterInformer constructs a new informer for Placement type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPlacementClusterInformer(client clientset.ClusterInterface, resyncPeriod time.Duration, indexers cache.Indexers) kcpcache.ScopeableSharedIndexInformer { - return NewFilteredPlacementClusterInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredPlacementClusterInformer constructs a new informer for Placement type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPlacementClusterInformer(client clientset.ClusterInterface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) kcpcache.ScopeableSharedIndexInformer { - return kcpinformers.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Placements().List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Placements().Watch(context.TODO(), options) - }, - }, - &schedulingv1alpha1.Placement{}, - resyncPeriod, - indexers, - ) -} - -func (f *placementClusterInformer) defaultInformer(client clientset.ClusterInterface, resyncPeriod time.Duration) kcpcache.ScopeableSharedIndexInformer { - return NewFilteredPlacementClusterInformer(client, resyncPeriod, cache.Indexers{ - kcpcache.ClusterIndexName: kcpcache.ClusterIndexFunc, - }, - f.tweakListOptions, - ) -} - -func (f *placementClusterInformer) Informer() kcpcache.ScopeableSharedIndexInformer { - return f.factory.InformerFor(&schedulingv1alpha1.Placement{}, f.defaultInformer) -} - -func (f *placementClusterInformer) Lister() schedulingv1alpha1listers.PlacementClusterLister { - return schedulingv1alpha1listers.NewPlacementClusterLister(f.Informer().GetIndexer()) -} - -// PlacementInformer provides access to a shared informer and lister for -// Placements. -type PlacementInformer interface { - Informer() cache.SharedIndexInformer - Lister() schedulingv1alpha1listers.PlacementLister -} - -func (f *placementClusterInformer) Cluster(clusterName logicalcluster.Name) PlacementInformer { - return &placementInformer{ - informer: f.Informer().Cluster(clusterName), - lister: f.Lister().Cluster(clusterName), - } -} - -type placementInformer struct { - informer cache.SharedIndexInformer - lister schedulingv1alpha1listers.PlacementLister -} - -func (f *placementInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -func (f *placementInformer) Lister() schedulingv1alpha1listers.PlacementLister { - return f.lister -} - -type placementScopedInformer struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -func (f *placementScopedInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&schedulingv1alpha1.Placement{}, f.defaultInformer) -} - -func (f *placementScopedInformer) Lister() schedulingv1alpha1listers.PlacementLister { - return schedulingv1alpha1listers.NewPlacementLister(f.Informer().GetIndexer()) -} - -// NewPlacementInformer constructs a new informer for Placement type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPlacementInformer(client scopedclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPlacementInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredPlacementInformer constructs a new informer for Placement type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPlacementInformer(client scopedclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Placements().List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.SchedulingV1alpha1().Placements().Watch(context.TODO(), options) - }, - }, - &schedulingv1alpha1.Placement{}, - resyncPeriod, - indexers, - ) -} - -func (f *placementScopedInformer) defaultInformer(client scopedclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPlacementInformer(client, resyncPeriod, cache.Indexers{}, f.tweakListOptions) -} diff --git a/sdk/client/informers/externalversions/workload/interface.go b/sdk/client/informers/externalversions/workload/interface.go deleted file mode 100644 index 5d22c8f2e49..00000000000 --- a/sdk/client/informers/externalversions/workload/interface.go +++ /dev/null @@ -1,68 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package workload - -import ( - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" -) - -type ClusterInterface interface { - // V1alpha1 provides access to the shared informers in V1alpha1. - V1alpha1() v1alpha1.ClusterInterface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new ClusterInterface. -func New(f internalinterfaces.SharedInformerFactory, tweakListOptions internalinterfaces.TweakListOptionsFunc) ClusterInterface { - return &group{factory: f, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.ClusterInterface. -func (g *group) V1alpha1() v1alpha1.ClusterInterface { - return v1alpha1.New(g.factory, g.tweakListOptions) -} - -type Interface interface { - // V1alpha1 provides access to the shared informers in V1alpha1. - V1alpha1() v1alpha1.Interface -} - -type scopedGroup struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// New returns a new Interface. -func NewScoped(f internalinterfaces.SharedScopedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &scopedGroup{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1alpha1 returns a new v1alpha1.ClusterInterface. -func (g *scopedGroup) V1alpha1() v1alpha1.Interface { - return v1alpha1.NewScoped(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/sdk/client/informers/externalversions/workload/v1alpha1/interface.go b/sdk/client/informers/externalversions/workload/v1alpha1/interface.go deleted file mode 100644 index 6c5cea6df18..00000000000 --- a/sdk/client/informers/externalversions/workload/v1alpha1/interface.go +++ /dev/null @@ -1,67 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" -) - -type ClusterInterface interface { - // SyncTargets returns a SyncTargetClusterInformer - SyncTargets() SyncTargetClusterInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new ClusterInterface. -func New(f internalinterfaces.SharedInformerFactory, tweakListOptions internalinterfaces.TweakListOptionsFunc) ClusterInterface { - return &version{factory: f, tweakListOptions: tweakListOptions} -} - -// SyncTargets returns a SyncTargetClusterInformer -func (v *version) SyncTargets() SyncTargetClusterInformer { - return &syncTargetClusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -type Interface interface { - // SyncTargets returns a SyncTargetInformer - SyncTargets() SyncTargetInformer -} - -type scopedVersion struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// New returns a new ClusterInterface. -func NewScoped(f internalinterfaces.SharedScopedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &scopedVersion{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// SyncTargets returns a SyncTargetInformer -func (v *scopedVersion) SyncTargets() SyncTargetInformer { - return &syncTargetScopedInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} diff --git a/sdk/client/informers/externalversions/workload/v1alpha1/synctarget.go b/sdk/client/informers/externalversions/workload/v1alpha1/synctarget.go deleted file mode 100644 index d2660313967..00000000000 --- a/sdk/client/informers/externalversions/workload/v1alpha1/synctarget.go +++ /dev/null @@ -1,179 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpinformers "github.com/kcp-dev/apimachinery/v2/third_party/informers" - "github.com/kcp-dev/logicalcluster/v3" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - scopedclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" - clientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/internalinterfaces" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -// SyncTargetClusterInformer provides access to a shared informer and lister for -// SyncTargets. -type SyncTargetClusterInformer interface { - Cluster(logicalcluster.Name) SyncTargetInformer - Informer() kcpcache.ScopeableSharedIndexInformer - Lister() workloadv1alpha1listers.SyncTargetClusterLister -} - -type syncTargetClusterInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewSyncTargetClusterInformer constructs a new informer for SyncTarget type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewSyncTargetClusterInformer(client clientset.ClusterInterface, resyncPeriod time.Duration, indexers cache.Indexers) kcpcache.ScopeableSharedIndexInformer { - return NewFilteredSyncTargetClusterInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredSyncTargetClusterInformer constructs a new informer for SyncTarget type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredSyncTargetClusterInformer(client clientset.ClusterInterface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) kcpcache.ScopeableSharedIndexInformer { - return kcpinformers.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.WorkloadV1alpha1().SyncTargets().List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.WorkloadV1alpha1().SyncTargets().Watch(context.TODO(), options) - }, - }, - &workloadv1alpha1.SyncTarget{}, - resyncPeriod, - indexers, - ) -} - -func (f *syncTargetClusterInformer) defaultInformer(client clientset.ClusterInterface, resyncPeriod time.Duration) kcpcache.ScopeableSharedIndexInformer { - return NewFilteredSyncTargetClusterInformer(client, resyncPeriod, cache.Indexers{ - kcpcache.ClusterIndexName: kcpcache.ClusterIndexFunc, - }, - f.tweakListOptions, - ) -} - -func (f *syncTargetClusterInformer) Informer() kcpcache.ScopeableSharedIndexInformer { - return f.factory.InformerFor(&workloadv1alpha1.SyncTarget{}, f.defaultInformer) -} - -func (f *syncTargetClusterInformer) Lister() workloadv1alpha1listers.SyncTargetClusterLister { - return workloadv1alpha1listers.NewSyncTargetClusterLister(f.Informer().GetIndexer()) -} - -// SyncTargetInformer provides access to a shared informer and lister for -// SyncTargets. -type SyncTargetInformer interface { - Informer() cache.SharedIndexInformer - Lister() workloadv1alpha1listers.SyncTargetLister -} - -func (f *syncTargetClusterInformer) Cluster(clusterName logicalcluster.Name) SyncTargetInformer { - return &syncTargetInformer{ - informer: f.Informer().Cluster(clusterName), - lister: f.Lister().Cluster(clusterName), - } -} - -type syncTargetInformer struct { - informer cache.SharedIndexInformer - lister workloadv1alpha1listers.SyncTargetLister -} - -func (f *syncTargetInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -func (f *syncTargetInformer) Lister() workloadv1alpha1listers.SyncTargetLister { - return f.lister -} - -type syncTargetScopedInformer struct { - factory internalinterfaces.SharedScopedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -func (f *syncTargetScopedInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&workloadv1alpha1.SyncTarget{}, f.defaultInformer) -} - -func (f *syncTargetScopedInformer) Lister() workloadv1alpha1listers.SyncTargetLister { - return workloadv1alpha1listers.NewSyncTargetLister(f.Informer().GetIndexer()) -} - -// NewSyncTargetInformer constructs a new informer for SyncTarget type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewSyncTargetInformer(client scopedclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredSyncTargetInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredSyncTargetInformer constructs a new informer for SyncTarget type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredSyncTargetInformer(client scopedclientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.WorkloadV1alpha1().SyncTargets().List(context.TODO(), options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.WorkloadV1alpha1().SyncTargets().Watch(context.TODO(), options) - }, - }, - &workloadv1alpha1.SyncTarget{}, - resyncPeriod, - indexers, - ) -} - -func (f *syncTargetScopedInformer) defaultInformer(client scopedclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredSyncTargetInformer(client, resyncPeriod, cache.Indexers{}, f.tweakListOptions) -} diff --git a/sdk/client/listers/scheduling/v1alpha1/location.go b/sdk/client/listers/scheduling/v1alpha1/location.go deleted file mode 100644 index ee469132aa3..00000000000 --- a/sdk/client/listers/scheduling/v1alpha1/location.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -// LocationClusterLister can list Locations across all workspaces, or scope down to a LocationLister for one workspace. -// All objects returned here must be treated as read-only. -type LocationClusterLister interface { - // List lists all Locations in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*schedulingv1alpha1.Location, err error) - // Cluster returns a lister that can list and get Locations in one workspace. - Cluster(clusterName logicalcluster.Name) LocationLister - LocationClusterListerExpansion -} - -type locationClusterLister struct { - indexer cache.Indexer -} - -// NewLocationClusterLister returns a new LocationClusterLister. -// We assume that the indexer: -// - is fed by a cross-workspace LIST+WATCH -// - uses kcpcache.MetaClusterNamespaceKeyFunc as the key function -// - has the kcpcache.ClusterIndex as an index -func NewLocationClusterLister(indexer cache.Indexer) *locationClusterLister { - return &locationClusterLister{indexer: indexer} -} - -// List lists all Locations in the indexer across all workspaces. -func (s *locationClusterLister) List(selector labels.Selector) (ret []*schedulingv1alpha1.Location, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*schedulingv1alpha1.Location)) - }) - return ret, err -} - -// Cluster scopes the lister to one workspace, allowing users to list and get Locations. -func (s *locationClusterLister) Cluster(clusterName logicalcluster.Name) LocationLister { - return &locationLister{indexer: s.indexer, clusterName: clusterName} -} - -// LocationLister can list all Locations, or get one in particular. -// All objects returned here must be treated as read-only. -type LocationLister interface { - // List lists all Locations in the workspace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*schedulingv1alpha1.Location, err error) - // Get retrieves the Location from the indexer for a given workspace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*schedulingv1alpha1.Location, error) - LocationListerExpansion -} - -// locationLister can list all Locations inside a workspace. -type locationLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name -} - -// List lists all Locations in the indexer for a workspace. -func (s *locationLister) List(selector labels.Selector) (ret []*schedulingv1alpha1.Location, err error) { - err = kcpcache.ListAllByCluster(s.indexer, s.clusterName, selector, func(i interface{}) { - ret = append(ret, i.(*schedulingv1alpha1.Location)) - }) - return ret, err -} - -// Get retrieves the Location from the indexer for a given workspace and name. -func (s *locationLister) Get(name string) (*schedulingv1alpha1.Location, error) { - key := kcpcache.ToClusterAwareKey(s.clusterName.String(), "", name) - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(schedulingv1alpha1.Resource("locations"), name) - } - return obj.(*schedulingv1alpha1.Location), nil -} - -// NewLocationLister returns a new LocationLister. -// We assume that the indexer: -// - is fed by a workspace-scoped LIST+WATCH -// - uses cache.MetaNamespaceKeyFunc as the key function -func NewLocationLister(indexer cache.Indexer) *locationScopedLister { - return &locationScopedLister{indexer: indexer} -} - -// locationScopedLister can list all Locations inside a workspace. -type locationScopedLister struct { - indexer cache.Indexer -} - -// List lists all Locations in the indexer for a workspace. -func (s *locationScopedLister) List(selector labels.Selector) (ret []*schedulingv1alpha1.Location, err error) { - err = cache.ListAll(s.indexer, selector, func(i interface{}) { - ret = append(ret, i.(*schedulingv1alpha1.Location)) - }) - return ret, err -} - -// Get retrieves the Location from the indexer for a given workspace and name. -func (s *locationScopedLister) Get(name string) (*schedulingv1alpha1.Location, error) { - key := name - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(schedulingv1alpha1.Resource("locations"), name) - } - return obj.(*schedulingv1alpha1.Location), nil -} diff --git a/sdk/client/listers/scheduling/v1alpha1/location_expansion.go b/sdk/client/listers/scheduling/v1alpha1/location_expansion.go deleted file mode 100644 index c0b0a3db57b..00000000000 --- a/sdk/client/listers/scheduling/v1alpha1/location_expansion.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -// LocationClusterListerExpansion allows custom methods to be added to LocationClusterLister. -type LocationClusterListerExpansion interface{} - -// LocationListerExpansion allows custom methods to be added to LocationLister. -type LocationListerExpansion interface{} diff --git a/sdk/client/listers/scheduling/v1alpha1/placement.go b/sdk/client/listers/scheduling/v1alpha1/placement.go deleted file mode 100644 index f9f1788a26f..00000000000 --- a/sdk/client/listers/scheduling/v1alpha1/placement.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" -) - -// PlacementClusterLister can list Placements across all workspaces, or scope down to a PlacementLister for one workspace. -// All objects returned here must be treated as read-only. -type PlacementClusterLister interface { - // List lists all Placements in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*schedulingv1alpha1.Placement, err error) - // Cluster returns a lister that can list and get Placements in one workspace. - Cluster(clusterName logicalcluster.Name) PlacementLister - PlacementClusterListerExpansion -} - -type placementClusterLister struct { - indexer cache.Indexer -} - -// NewPlacementClusterLister returns a new PlacementClusterLister. -// We assume that the indexer: -// - is fed by a cross-workspace LIST+WATCH -// - uses kcpcache.MetaClusterNamespaceKeyFunc as the key function -// - has the kcpcache.ClusterIndex as an index -func NewPlacementClusterLister(indexer cache.Indexer) *placementClusterLister { - return &placementClusterLister{indexer: indexer} -} - -// List lists all Placements in the indexer across all workspaces. -func (s *placementClusterLister) List(selector labels.Selector) (ret []*schedulingv1alpha1.Placement, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*schedulingv1alpha1.Placement)) - }) - return ret, err -} - -// Cluster scopes the lister to one workspace, allowing users to list and get Placements. -func (s *placementClusterLister) Cluster(clusterName logicalcluster.Name) PlacementLister { - return &placementLister{indexer: s.indexer, clusterName: clusterName} -} - -// PlacementLister can list all Placements, or get one in particular. -// All objects returned here must be treated as read-only. -type PlacementLister interface { - // List lists all Placements in the workspace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*schedulingv1alpha1.Placement, err error) - // Get retrieves the Placement from the indexer for a given workspace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*schedulingv1alpha1.Placement, error) - PlacementListerExpansion -} - -// placementLister can list all Placements inside a workspace. -type placementLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name -} - -// List lists all Placements in the indexer for a workspace. -func (s *placementLister) List(selector labels.Selector) (ret []*schedulingv1alpha1.Placement, err error) { - err = kcpcache.ListAllByCluster(s.indexer, s.clusterName, selector, func(i interface{}) { - ret = append(ret, i.(*schedulingv1alpha1.Placement)) - }) - return ret, err -} - -// Get retrieves the Placement from the indexer for a given workspace and name. -func (s *placementLister) Get(name string) (*schedulingv1alpha1.Placement, error) { - key := kcpcache.ToClusterAwareKey(s.clusterName.String(), "", name) - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(schedulingv1alpha1.Resource("placements"), name) - } - return obj.(*schedulingv1alpha1.Placement), nil -} - -// NewPlacementLister returns a new PlacementLister. -// We assume that the indexer: -// - is fed by a workspace-scoped LIST+WATCH -// - uses cache.MetaNamespaceKeyFunc as the key function -func NewPlacementLister(indexer cache.Indexer) *placementScopedLister { - return &placementScopedLister{indexer: indexer} -} - -// placementScopedLister can list all Placements inside a workspace. -type placementScopedLister struct { - indexer cache.Indexer -} - -// List lists all Placements in the indexer for a workspace. -func (s *placementScopedLister) List(selector labels.Selector) (ret []*schedulingv1alpha1.Placement, err error) { - err = cache.ListAll(s.indexer, selector, func(i interface{}) { - ret = append(ret, i.(*schedulingv1alpha1.Placement)) - }) - return ret, err -} - -// Get retrieves the Placement from the indexer for a given workspace and name. -func (s *placementScopedLister) Get(name string) (*schedulingv1alpha1.Placement, error) { - key := name - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(schedulingv1alpha1.Resource("placements"), name) - } - return obj.(*schedulingv1alpha1.Placement), nil -} diff --git a/sdk/client/listers/scheduling/v1alpha1/placement_expansion.go b/sdk/client/listers/scheduling/v1alpha1/placement_expansion.go deleted file mode 100644 index 2c813b0cb5f..00000000000 --- a/sdk/client/listers/scheduling/v1alpha1/placement_expansion.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -// PlacementClusterListerExpansion allows custom methods to be added to PlacementClusterLister. -type PlacementClusterListerExpansion interface{} - -// PlacementListerExpansion allows custom methods to be added to PlacementLister. -type PlacementListerExpansion interface{} diff --git a/sdk/client/listers/workload/v1alpha1/synctarget.go b/sdk/client/listers/workload/v1alpha1/synctarget.go deleted file mode 100644 index b14d63ff467..00000000000 --- a/sdk/client/listers/workload/v1alpha1/synctarget.go +++ /dev/null @@ -1,143 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -import ( - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// SyncTargetClusterLister can list SyncTargets across all workspaces, or scope down to a SyncTargetLister for one workspace. -// All objects returned here must be treated as read-only. -type SyncTargetClusterLister interface { - // List lists all SyncTargets in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*workloadv1alpha1.SyncTarget, err error) - // Cluster returns a lister that can list and get SyncTargets in one workspace. - Cluster(clusterName logicalcluster.Name) SyncTargetLister - SyncTargetClusterListerExpansion -} - -type syncTargetClusterLister struct { - indexer cache.Indexer -} - -// NewSyncTargetClusterLister returns a new SyncTargetClusterLister. -// We assume that the indexer: -// - is fed by a cross-workspace LIST+WATCH -// - uses kcpcache.MetaClusterNamespaceKeyFunc as the key function -// - has the kcpcache.ClusterIndex as an index -func NewSyncTargetClusterLister(indexer cache.Indexer) *syncTargetClusterLister { - return &syncTargetClusterLister{indexer: indexer} -} - -// List lists all SyncTargets in the indexer across all workspaces. -func (s *syncTargetClusterLister) List(selector labels.Selector) (ret []*workloadv1alpha1.SyncTarget, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*workloadv1alpha1.SyncTarget)) - }) - return ret, err -} - -// Cluster scopes the lister to one workspace, allowing users to list and get SyncTargets. -func (s *syncTargetClusterLister) Cluster(clusterName logicalcluster.Name) SyncTargetLister { - return &syncTargetLister{indexer: s.indexer, clusterName: clusterName} -} - -// SyncTargetLister can list all SyncTargets, or get one in particular. -// All objects returned here must be treated as read-only. -type SyncTargetLister interface { - // List lists all SyncTargets in the workspace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*workloadv1alpha1.SyncTarget, err error) - // Get retrieves the SyncTarget from the indexer for a given workspace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*workloadv1alpha1.SyncTarget, error) - SyncTargetListerExpansion -} - -// syncTargetLister can list all SyncTargets inside a workspace. -type syncTargetLister struct { - indexer cache.Indexer - clusterName logicalcluster.Name -} - -// List lists all SyncTargets in the indexer for a workspace. -func (s *syncTargetLister) List(selector labels.Selector) (ret []*workloadv1alpha1.SyncTarget, err error) { - err = kcpcache.ListAllByCluster(s.indexer, s.clusterName, selector, func(i interface{}) { - ret = append(ret, i.(*workloadv1alpha1.SyncTarget)) - }) - return ret, err -} - -// Get retrieves the SyncTarget from the indexer for a given workspace and name. -func (s *syncTargetLister) Get(name string) (*workloadv1alpha1.SyncTarget, error) { - key := kcpcache.ToClusterAwareKey(s.clusterName.String(), "", name) - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(workloadv1alpha1.Resource("synctargets"), name) - } - return obj.(*workloadv1alpha1.SyncTarget), nil -} - -// NewSyncTargetLister returns a new SyncTargetLister. -// We assume that the indexer: -// - is fed by a workspace-scoped LIST+WATCH -// - uses cache.MetaNamespaceKeyFunc as the key function -func NewSyncTargetLister(indexer cache.Indexer) *syncTargetScopedLister { - return &syncTargetScopedLister{indexer: indexer} -} - -// syncTargetScopedLister can list all SyncTargets inside a workspace. -type syncTargetScopedLister struct { - indexer cache.Indexer -} - -// List lists all SyncTargets in the indexer for a workspace. -func (s *syncTargetScopedLister) List(selector labels.Selector) (ret []*workloadv1alpha1.SyncTarget, err error) { - err = cache.ListAll(s.indexer, selector, func(i interface{}) { - ret = append(ret, i.(*workloadv1alpha1.SyncTarget)) - }) - return ret, err -} - -// Get retrieves the SyncTarget from the indexer for a given workspace and name. -func (s *syncTargetScopedLister) Get(name string) (*workloadv1alpha1.SyncTarget, error) { - key := name - obj, exists, err := s.indexer.GetByKey(key) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(workloadv1alpha1.Resource("synctargets"), name) - } - return obj.(*workloadv1alpha1.SyncTarget), nil -} diff --git a/sdk/client/listers/workload/v1alpha1/synctarget_expansion.go b/sdk/client/listers/workload/v1alpha1/synctarget_expansion.go deleted file mode 100644 index 68ddf9a3efc..00000000000 --- a/sdk/client/listers/workload/v1alpha1/synctarget_expansion.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by kcp code-generator. DO NOT EDIT. - -package v1alpha1 - -// SyncTargetClusterListerExpansion allows custom methods to be added to SyncTargetClusterLister. -type SyncTargetClusterListerExpansion interface{} - -// SyncTargetListerExpansion allows custom methods to be added to SyncTargetLister. -type SyncTargetListerExpansion interface{} diff --git a/test/e2e/framework/kcp.go b/test/e2e/framework/kcp.go index 34f600b09ef..f1935b6142b 100644 --- a/test/e2e/framework/kcp.go +++ b/test/e2e/framework/kcp.go @@ -586,9 +586,6 @@ func DirectOrGoRunCommand(executableName string) []string { return []string{cmdPath} } cmdPath := filepath.Join(RepositoryDir(), "cmd", executableName) - if executableName == "deployment-coordinator" { - cmdPath = filepath.Join(RepositoryDir(), "tmc", "cmd", executableName) - } return []string{"go", "run", cmdPath} } diff --git a/test/e2e/framework/syncer.go b/test/e2e/framework/syncer.go deleted file mode 100644 index f40b6c585d6..00000000000 --- a/test/e2e/framework/syncer.go +++ /dev/null @@ -1,842 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "testing" - "time" - - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/dynamicinformer" - kubernetesinformers "k8s.io/client-go/informers" - kubernetesclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/retry" - "k8s.io/klog/v2" - "sigs.k8s.io/yaml" - - workloadcliplugin "github.com/kcp-dev/kcp/pkg/cliplugins/workload/plugin" - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/syncer" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - apiresourcev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apiresource/v1alpha1" - scheduling1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" -) - -type SyncerOption func(t *testing.T, fs *syncerFixture) - -func NewSyncerFixture(t *testing.T, server RunningServer, path logicalcluster.Path, opts ...SyncerOption) *syncerFixture { - t.Helper() - - if !sets.New[string](TestConfig.Suites()...).HasAny("transparent-multi-cluster", "transparent-multi-cluster:requires-kind") { - t.Fatalf("invalid to use a syncer fixture when only the following suites were requested: %v", TestConfig.Suites()) - } - sf := &syncerFixture{ - upstreamServer: server, - syncTargetPath: path, - syncTargetName: "psyncer-01", - } - for _, opt := range opts { - opt(t, sf) - } - return sf -} - -// syncerFixture configures a syncer fixture. Its `Start` method does the work of starting a syncer. -type syncerFixture struct { - upstreamServer RunningServer - - syncedUserClusterNames []logicalcluster.Name - - syncTargetPath logicalcluster.Path - syncTargetName string - syncTargetLabels map[string]string - - extraResourcesToSync []string - apiExports []string - prepareDownstream func(config *rest.Config, isFakePCluster bool) -} - -func WithSyncTargetName(name string) SyncerOption { - return func(t *testing.T, sf *syncerFixture) { - t.Helper() - sf.syncTargetName = name - } -} - -func WithSyncTargetLabels(labels map[string]string) SyncerOption { - return func(t *testing.T, sf *syncerFixture) { - t.Helper() - sf.syncTargetLabels = labels - } -} - -func WithSyncedUserWorkspaces(syncedUserWorkspaces ...*tenancyv1alpha1.Workspace) SyncerOption { - return func(t *testing.T, sf *syncerFixture) { - t.Helper() - for _, ws := range syncedUserWorkspaces { - sf.syncedUserClusterNames = append(sf.syncedUserClusterNames, logicalcluster.Name(ws.Spec.Cluster)) - } - } -} - -func WithExtraResources(resources ...string) SyncerOption { - return func(t *testing.T, sf *syncerFixture) { - t.Helper() - sf.extraResourcesToSync = append(sf.extraResourcesToSync, resources...) - } -} - -func WithAPIExports(exports ...string) SyncerOption { - return func(t *testing.T, sf *syncerFixture) { - t.Helper() - sf.apiExports = append(sf.apiExports, exports...) - } -} - -func WithDownstreamPreparation(prepare func(config *rest.Config, isFakePCluster bool)) SyncerOption { - return func(t *testing.T, sf *syncerFixture) { - t.Helper() - sf.prepareDownstream = prepare - } -} - -// CreateSyncTargetAndApplyToDownstream creates a SyncTarget resource through the `workload sync` CLI command, -// applies the syncer-related resources in the physical cluster. -// No resource will be effectively synced after calling this method. -func (sf *syncerFixture) CreateSyncTargetAndApplyToDownstream(t *testing.T) *appliedSyncerFixture { - t.Helper() - - artifactDir, _, err := ScratchDirs(t) - if err != nil { - t.Errorf("failed to create temp dir for syncer artifacts: %v", err) - } - - useDeployedSyncer := len(TestConfig.PClusterKubeconfig()) > 0 - - // Write the upstream logical cluster config to disk for the workspace plugin - upstreamRawConfig, err := sf.upstreamServer.RawConfig() - require.NoError(t, err) - _, kubeconfigPath := WriteLogicalClusterConfig(t, upstreamRawConfig, "base", sf.syncTargetPath) - - syncerImage := TestConfig.SyncerImage() - if useDeployedSyncer { - require.NotZero(t, len(syncerImage), "--syncer-image must be specified if testing with a deployed syncer") - } else { - // The image needs to be a non-empty string for the plugin command but the value doesn't matter if not deploying a syncer. - syncerImage = "not-a-valid-image" - } - - // Run the plugin command to enable the syncer and collect the resulting yaml - t.Logf("Configuring workspace %s for syncing", sf.syncTargetPath) - pluginArgs := []string{ - "workload", - "sync", - sf.syncTargetName, - "--syncer-image=" + syncerImage, - "--output-file=-", - "--qps=-1", - "--feature-gates=" + fmt.Sprintf("%s", utilfeature.DefaultFeatureGate), - "--api-import-poll-interval=5s", - "--downstream-namespace-clean-delay=2s", - } - for _, resource := range sf.extraResourcesToSync { - pluginArgs = append(pluginArgs, "--resources="+resource) - } - for _, export := range sf.apiExports { - pluginArgs = append(pluginArgs, "--apiexports="+export) - } - for k, v := range sf.syncTargetLabels { - pluginArgs = append(pluginArgs, fmt.Sprintf("--labels=%s=%s", k, v)) - } - - syncerYAML := RunKcpCliPlugin(t, kubeconfigPath, pluginArgs) - - var downstreamConfig *rest.Config - var downstreamKubeconfigPath string - if useDeployedSyncer { - // The syncer will target the pcluster identified by `--pcluster-kubeconfig`. - downstreamKubeconfigPath = TestConfig.PClusterKubeconfig() - fs, err := os.Stat(downstreamKubeconfigPath) - require.NoError(t, err) - require.NotZero(t, fs.Size(), "%s points to an empty file", downstreamKubeconfigPath) - rawConfig, err := clientcmd.LoadFromFile(downstreamKubeconfigPath) - require.NoError(t, err, "failed to load pcluster kubeconfig") - config := clientcmd.NewNonInteractiveClientConfig(*rawConfig, rawConfig.CurrentContext, nil, nil) - downstreamConfig, err = config.ClientConfig() - require.NoError(t, err) - } else { - // The syncer will target a logical cluster that is a child of the current workspace. A - // logical server provides as a lightweight approximation of a pcluster for tests that - // don't need to validate running workloads or interaction with kube controllers. - downstreamServer := NewFakeWorkloadServer(t, sf.upstreamServer, sf.syncTargetPath, sf.syncTargetName) - downstreamConfig = downstreamServer.BaseConfig(t) - downstreamKubeconfigPath = downstreamServer.KubeconfigPath() - } - - if sf.prepareDownstream != nil { - // Attempt crd installation to ensure the downstream server has an api surface - // compatible with the test. - sf.prepareDownstream(downstreamConfig, !useDeployedSyncer) - } - - // Apply the yaml output from the plugin to the downstream server - KubectlApply(t, downstreamKubeconfigPath, syncerYAML) - - // collect both in deployed and in-process mode - t.Cleanup(func() { - ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(wait.ForeverTestTimeout)) - defer cancelFn() - - t.Logf("Collecting imported resource info: %s", artifactDir) - upstreamCfg := sf.upstreamServer.BaseConfig(t) - - gather := func(client dynamic.Interface, gvr schema.GroupVersionResource) { - resourceClient := client.Resource(gvr) - - list, err := resourceClient.List(ctx, metav1.ListOptions{}) - if err != nil { - // Don't fail the test - t.Logf("Error gathering %s: %v", gvr, err) - return - } - - for i := range list.Items { - item := list.Items[i] - sf.upstreamServer.Artifact(t, func() (runtime.Object, error) { - return &item, nil - }) - } - } - - upstreamClusterDynamic, err := kcpdynamic.NewForConfig(upstreamCfg) - require.NoError(t, err, "error creating upstream dynamic client") - - downstreamDynamic, err := dynamic.NewForConfig(downstreamConfig) - require.NoError(t, err, "error creating downstream dynamic client") - - kcpClusterClient, err := kcpclientset.NewForConfig(upstreamCfg) - require.NoError(t, err, "error creating upstream kcp client") - - gather(upstreamClusterDynamic.Cluster(sf.syncTargetPath), workloadv1alpha1.SchemeGroupVersion.WithResource("synctargets")) - gather(upstreamClusterDynamic.Cluster(sf.syncTargetPath), scheduling1alpha1.SchemeGroupVersion.WithResource("locations")) - gather(upstreamClusterDynamic.Cluster(sf.syncTargetPath), apiresourcev1alpha1.SchemeGroupVersion.WithResource("apiresourceimports")) - gather(upstreamClusterDynamic.Cluster(sf.syncTargetPath), apiresourcev1alpha1.SchemeGroupVersion.WithResource("negotiatedapiresources")) - gather(upstreamClusterDynamic.Cluster(sf.syncTargetPath), corev1.SchemeGroupVersion.WithResource("namespaces")) - gather(downstreamDynamic, corev1.SchemeGroupVersion.WithResource("namespaces")) - - syncTarget, err := kcpClusterClient.Cluster(sf.syncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, sf.syncTargetName, metav1.GetOptions{}) - if err != nil { - t.Logf("Error gathering sync target: %v", err) - return - } - - for _, resource := range syncTarget.Status.SyncedResources { - for _, version := range resource.Versions { - gvr := schema.GroupVersionResource{ - Group: resource.Group, - Resource: resource.Resource, - Version: version, - } - for _, syncedUserClusterName := range sf.syncedUserClusterNames { - gather(upstreamClusterDynamic.Cluster(syncedUserClusterName.Path()), gvr) - } - gather(downstreamDynamic, gvr) - } - } - }) - - // Extract the configuration for an in-process syncer from the resources that were - // applied to the downstream server. This maximizes the parity between the - // configuration of a deployed and in-process syncer. - var syncerID string - for _, doc := range strings.Split(string(syncerYAML), "\n---\n") { - var manifest struct { - metav1.ObjectMeta `json:"metadata"` - } - err := yaml.Unmarshal([]byte(doc), &manifest) - require.NoError(t, err) - if manifest.Namespace != "" { - syncerID = manifest.Namespace - break - } - } - require.NotEmpty(t, syncerID, "failed to extract syncer namespace from yaml produced by plugin:\n%s", string(syncerYAML)) - - syncerConfig := syncerConfigFromCluster(t, downstreamConfig, syncerID, syncerID) - - downstreamKubeClient, err := kubernetesclient.NewForConfig(downstreamConfig) - require.NoError(t, err) - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kcpClusterClient, err := kcpclientset.NewForConfig(syncerConfig.UpstreamConfig) - require.NoError(t, err) - var syncTargetClusterName logicalcluster.Name - syncTarget, err := kcpClusterClient.Cluster(syncerConfig.SyncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncerConfig.SyncTargetName, metav1.GetOptions{}) - require.NoError(t, err) - - syncTargetClusterName = logicalcluster.From(syncTarget) - - getVWURLs := func(toURL func(workloadv1alpha1.VirtualWorkspace) string) func() []string { - return func() []string { - syncTarget, err := kcpClusterClient.Cluster(syncerConfig.SyncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncerConfig.SyncTargetName, metav1.GetOptions{}) - require.NoError(t, err) - - var urls []string - for _, vw := range syncTarget.Status.VirtualWorkspaces { - urls = append(urls, toURL(vw)) - } - return urls - } - } - - return &appliedSyncerFixture{ - syncerFixture: *sf, - - SyncerConfig: syncerConfig, - SyncerID: syncerID, - SyncTargetClusterName: syncTargetClusterName, - DownstreamConfig: downstreamConfig, - DownstreamKubeClient: downstreamKubeClient, - DownstreamKubeconfigPath: downstreamKubeconfigPath, - - GetSyncerVirtualWorkspaceURLs: getVWURLs(func(vw workloadv1alpha1.VirtualWorkspace) string { return vw.SyncerURL }), - GetUpsyncerVirtualWorkspaceURLs: getVWURLs(func(vw workloadv1alpha1.VirtualWorkspace) string { return vw.UpsyncerURL }), - } -} - -// StartSyncer starts a new Syncer against the upstream kcp workspaces -// Whether the syncer runs in-process or deployed on a pcluster will depend -// whether --pcluster-kubeconfig and --syncer-image are supplied to the test invocation. -func (sf *appliedSyncerFixture) StartSyncer(t *testing.T) *StartedSyncerFixture { - t.Helper() - - useDeployedSyncer := len(TestConfig.PClusterKubeconfig()) > 0 - artifactDir, _, err := ScratchDirs(t) - if err != nil { - t.Errorf("failed to create temp dir for syncer artifacts: %v", err) - } - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - if useDeployedSyncer { - t.Cleanup(func() { - ctx, cancelFn := context.WithDeadline(context.Background(), time.Now().Add(wait.ForeverTestTimeout)) - defer cancelFn() - - // collect syncer logs - t.Logf("Collecting syncer pod logs") - func() { - t.Logf("Listing downstream pods in namespace %s", sf.SyncerID) - pods, err := sf.DownstreamKubeClient.CoreV1().Pods(sf.SyncerID).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("failed to list pods in %s: %v", sf.SyncerID, err) - return - } - - for _, pod := range pods.Items { - // Check if the POD is ready before trying to get the logs, ignore if not to avoid the test failing. - if pod.Status.Phase != corev1.PodRunning { - t.Logf("Pod %s is not running", pod.Name) - continue - } - artifactPath := filepath.Join(artifactDir, fmt.Sprintf("syncer-%s-%s.log", sf.SyncerID, pod.Name)) - - // if the log is stopped or has crashed we will try to get --previous logs. - extraArg := "" - if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { - extraArg = "--previous" - } - - t.Logf("Collecting downstream logs for pod %s/%s: %s", sf.SyncerID, pod.Name, artifactPath) - logs := Kubectl(t, sf.DownstreamKubeconfigPath, "-n", sf.SyncerID, "logs", pod.Name, extraArg) - - err = os.WriteFile(artifactPath, logs, 0644) - if err != nil { - t.Logf("failed to write logs for pod %s in %s to %s: %v", pod.Name, sf.SyncerID, artifactPath, err) - continue // not fatal - } - } - }() - - if preserveTestResources() { - return - } - - t.Logf("Deleting syncer resources for sync target %s|%s", sf.SyncerConfig.SyncTargetPath, sf.SyncerConfig.SyncTargetName) - err := sf.DownstreamKubeClient.CoreV1().Namespaces().Delete(ctx, sf.SyncerID, metav1.DeleteOptions{}) - if err != nil { - t.Errorf("failed to delete Namespace %q: %v", sf.SyncerID, err) - } - err = sf.DownstreamKubeClient.RbacV1().ClusterRoleBindings().Delete(ctx, sf.SyncerID, metav1.DeleteOptions{}) - if err != nil { - t.Errorf("failed to delete ClusterRoleBinding %q: %v", sf.SyncerID, err) - } - err = sf.DownstreamKubeClient.RbacV1().ClusterRoles().Delete(ctx, sf.SyncerID, metav1.DeleteOptions{}) - if err != nil { - t.Errorf("failed to delete ClusterRole %q: %v", sf.SyncerID, err) - } - - t.Logf("Deleting synced resources for sync target %s|%s", sf.SyncerConfig.SyncTargetPath, sf.SyncerConfig.SyncTargetName) - namespaces, err := sf.DownstreamKubeClient.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) - if err != nil { - t.Errorf("failed to list namespaces: %v", err) - } - for _, ns := range namespaces.Items { - locator, exists, err := shared.LocatorFromAnnotations(ns.Annotations) - require.NoError(t, err, "failed to extract locator from namespace %s", ns.Name) - if !exists { - continue // Not a kcp-synced namespace - } - found := false - for _, syncedUserWorkspace := range sf.syncedUserClusterNames { - if locator.ClusterName == syncedUserWorkspace { - found = true - break - } - } - if !found { - continue // Not a namespace synced by this Syncer - } - if locator.SyncTarget.ClusterName != sf.SyncerConfig.SyncTargetPath.String() || - locator.SyncTarget.Name != sf.SyncerConfig.SyncTargetName { - continue // Not a namespace synced by this syncer - } - if err = sf.DownstreamKubeClient.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}); err != nil { - t.Logf("failed to delete Namespace %q: %v", ns.Name, err) - } - } - }) - } else { - // Start an in-process syncer - sf.SyncerConfig.DNSImage = "TODO" - err := syncer.StartSyncer(ctx, sf.SyncerConfig, 2, 5*time.Second, sf.SyncerID) - require.NoError(t, err, "syncer failed to start") - - _, err = sf.DownstreamKubeClient.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: "syncer-rbac-fix", - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"*"}, - APIGroups: []string{rbacv1.SchemeGroupVersion.Group}, - Resources: []string{"roles", "rolebindings"}, - }, - }, - }, metav1.CreateOptions{}) - if !apierrors.IsNotFound(err) { - require.NoError(t, err) - } else { - t.Log("Fix ClusterRoleBinding already added") - } - - _, err = sf.DownstreamKubeClient.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "syncer-rbac-fix-" + sf.SyncerID, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.SchemeGroupVersion.Group, - Kind: "ClusterRole", - Name: "syncer-rbac-fix", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: sf.SyncerID, - Namespace: sf.SyncerID, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - for _, syncedUserWorkspace := range sf.syncedUserClusterNames { - dnsID := shared.GetDNSID(syncedUserWorkspace, types.UID(sf.SyncerConfig.SyncTargetUID), sf.SyncerConfig.SyncTargetName) - _, err := sf.DownstreamKubeClient.CoreV1().Endpoints(sf.SyncerID).Create(ctx, endpoints(dnsID, sf.SyncerID), metav1.CreateOptions{}) - if apierrors.IsAlreadyExists(err) { - t.Logf("Failed creating the fake Syncer Endpoint since it already exists - ignoring: %v", err) - } else { - require.NoError(t, err) - } - - // The DNS service may or may not have been created by the spec controller. In any cases, we want to make sure - // the service ClusterIP is set - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - svc, err := sf.DownstreamKubeClient.CoreV1().Services(sf.SyncerID).Get(ctx, dnsID, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return err - } - if apierrors.IsNotFound(err) { - _, err = sf.DownstreamKubeClient.CoreV1().Services(sf.SyncerID).Create(ctx, service(dnsID, sf.SyncerID), metav1.CreateOptions{}) - if err == nil { - return nil - } - if !apierrors.IsAlreadyExists(err) { - return err - } - svc, err = sf.DownstreamKubeClient.CoreV1().Services(sf.SyncerID).Get(ctx, dnsID, metav1.GetOptions{}) - if err != nil { - return err - } - } - - svc.Spec.ClusterIP = "8.8.8.8" - _, err = sf.DownstreamKubeClient.CoreV1().Services(sf.SyncerID).Update(ctx, svc, metav1.UpdateOptions{}) - return err - }) - require.NoError(t, err) - } - } - - startedSyncer := &StartedSyncerFixture{ - sf, - } - - // The sync target becoming ready indicates the syncer is healthy and has - // successfully sent a heartbeat to kcp. - startedSyncer.WaitForSyncTargetReady(ctx, t) - - return startedSyncer -} - -// appliedSyncerFixture contains the configuration required to start a syncer and interact with its -// downstream cluster. -type appliedSyncerFixture struct { - syncerFixture - - SyncerConfig *syncer.SyncerConfig - SyncerID string - SyncTargetClusterName logicalcluster.Name - - // Provide cluster-admin config and client for test purposes. The downstream config in - // SyncerConfig will be less privileged. - DownstreamConfig *rest.Config - DownstreamKubeClient kubernetesclient.Interface - DownstreamKubeconfigPath string - - GetSyncerVirtualWorkspaceURLs func() []string - GetUpsyncerVirtualWorkspaceURLs func() []string - - stopHeartBeat context.CancelFunc - stopSyncerTunnel context.CancelFunc -} - -func (sf *appliedSyncerFixture) StartSyncerTunnel(t *testing.T) *StartedSyncerFixture { - t.Helper() - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - sf.stopSyncerTunnel = cancelFunc - - downstreamClient, err := dynamic.NewForConfig(sf.SyncerConfig.DownstreamConfig) - require.NoError(t, err) - - downstreamInformer := dynamicinformer.NewDynamicSharedInformerFactory(downstreamClient, 10*time.Hour) - downstreamInformer.Start(ctx.Done()) - - podGvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} - namespaceGvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"} - informers := make(map[schema.GroupVersionResource]kubernetesinformers.GenericInformer) - - // Let's bootstrap the pod and namespace informers so they are ready to use during tests. - informers[podGvr] = downstreamInformer.ForResource(podGvr) - indexers.AddIfNotPresentOrDie(informers[podGvr].Informer().GetIndexer(), cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) - informers[namespaceGvr] = downstreamInformer.ForResource(namespaceGvr) - - syncer.StartSyncerTunnel(ctx, sf.SyncerConfig.UpstreamConfig, sf.SyncerConfig.DownstreamConfig, sf.SyncTargetClusterName, sf.SyncerConfig.SyncTargetName, sf.SyncerConfig.SyncTargetUID, func(gvr schema.GroupVersionResource) (cache.GenericLister, error) { - if _, ok := informers[gvr]; !ok { - return nil, fmt.Errorf("no informer for %v", gvr) - } - return informers[gvr].Lister(), nil - }) - startedSyncer := &StartedSyncerFixture{ - sf, - } - - return startedSyncer -} - -// StopSyncerTunnel stops the syncer tunnel, the syncer will close the reverse connection and -// pod subresources will not be available anymore. -func (sf *StartedSyncerFixture) StopSyncerTunnel(t *testing.T) { - t.Helper() - - sf.stopSyncerTunnel() -} - -// StartHeartBeat starts the Heartbeat keeper to maintain -// the SyncTarget to the Ready state. -// No resource will be effectively synced after calling this method. -func (sf *appliedSyncerFixture) StartHeartBeat(t *testing.T) *StartedSyncerFixture { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - sf.stopHeartBeat = cancelFunc - - kcpBootstrapClusterClient, err := kcpclientset.NewForConfig(sf.SyncerConfig.UpstreamConfig) - require.NoError(t, err) - kcpSyncTargetClient := kcpBootstrapClusterClient.Cluster(sf.SyncerConfig.SyncTargetPath) - - // Start the heartbeat keeper to have the SyncTarget always ready during the e2e test. - syncer.StartHeartbeat(ctx, kcpSyncTargetClient, sf.SyncerConfig.SyncTargetName, sf.SyncerConfig.SyncTargetUID) - - startedSyncer := &StartedSyncerFixture{ - sf, - } - - // The sync target becoming ready indicates the syncer is healthy and has - // successfully sent a heartbeat to kcp. - startedSyncer.WaitForSyncTargetReady(ctx, t) - - return startedSyncer -} - -// StartAPIImporter starts the APIImporter the same way as the Syncer would have done if started. -// This will allow KCP to do the API compatibilitiy checks and update the SyncTarget accordingly. -// The real syncer is not started, and resource will be effectively synced after calling this method. -func (sf *appliedSyncerFixture) StartAPIImporter(t *testing.T) *appliedSyncerFixture { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kcpBootstrapClusterClient, err := kcpclientset.NewForConfig(sf.SyncerConfig.UpstreamConfig) - require.NoError(t, err) - kcpSyncTargetClient := kcpBootstrapClusterClient.Cluster(sf.SyncerConfig.SyncTargetPath) - - // Import the resource schemas of the resources to sync from the physical cludster, to enable compatibility check in KCP. - resources := sets.List[string](sf.SyncerConfig.ResourcesToSync) - kcpSyncTargetInformerFactory := kcpinformers.NewSharedScopedInformerFactoryWithOptions(kcpSyncTargetClient, 10*time.Hour, kcpinformers.WithTweakListOptions( - func(listOptions *metav1.ListOptions) { - listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", sf.SyncerConfig.SyncTargetName).String() - }, - )) - kcpImporterInformerFactory := kcpinformers.NewSharedScopedInformerFactoryWithOptions(kcpSyncTargetClient, 10*time.Hour) - apiImporter, err := syncer.NewAPIImporter( - sf.SyncerConfig.UpstreamConfig, sf.SyncerConfig.DownstreamConfig, - kcpSyncTargetInformerFactory.Workload().V1alpha1().SyncTargets(), - kcpImporterInformerFactory.Apiresource().V1alpha1().APIResourceImports(), - resources, - sf.SyncerConfig.SyncTargetPath, sf.SyncerConfig.SyncTargetName, types.UID(sf.SyncerConfig.SyncTargetUID)) - require.NoError(t, err) - - kcpImporterInformerFactory.Start(ctx.Done()) - kcpSyncTargetInformerFactory.Start(ctx.Done()) - kcpSyncTargetInformerFactory.WaitForCacheSync(ctx.Done()) - - go apiImporter.Start(klog.NewContext(ctx, klog.FromContext(ctx).WithValues("resources", resources)), 5*time.Second) - - return sf -} - -// StartedSyncerFixture contains the configuration used to start a syncer and interact with its -// downstream cluster. -type StartedSyncerFixture struct { - *appliedSyncerFixture -} - -// StopHeartBeat stop maintaining the heartbeat for this Syncer SyncTarget. -func (sf *StartedSyncerFixture) StopHeartBeat(t *testing.T) { - t.Helper() - - sf.stopHeartBeat() -} - -// WaitForSyncTargetReady waits for the SyncTarget to be ready. -// The SyncTarget becoming ready indicates that the syncer on the related -// physical cluster is healthy and has successfully sent a heartbeat to kcp. -func (sf *StartedSyncerFixture) WaitForSyncTargetReady(ctx context.Context, t *testing.T) { - t.Helper() - - cfg := sf.SyncerConfig - - kcpClusterClient, err := kcpclientset.NewForConfig(cfg.UpstreamConfig) - require.NoError(t, err) - EventuallyReady(t, func() (conditions.Getter, error) { - return kcpClusterClient.Cluster(cfg.SyncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, cfg.SyncTargetName, metav1.GetOptions{}) - }, "Waiting for cluster %q condition %q", cfg.SyncTargetName, conditionsv1alpha1.ReadyCondition) - t.Logf("Cluster %q is %s", cfg.SyncTargetName, conditionsv1alpha1.ReadyCondition) -} - -func (sf *StartedSyncerFixture) DownstreamNamespaceFor(t *testing.T, upstreamWorkspace logicalcluster.Name, upstreamNamespace string) string { - t.Helper() - - desiredNSLocator := shared.NewNamespaceLocator(upstreamWorkspace, sf.SyncTargetClusterName, - types.UID(sf.SyncerConfig.SyncTargetUID), sf.SyncerConfig.SyncTargetName, upstreamNamespace) - downstreamNamespaceName, err := shared.PhysicalClusterNamespaceName(desiredNSLocator) - require.NoError(t, err) - return downstreamNamespaceName -} - -func (sf *StartedSyncerFixture) ToSyncTargetKey() string { - return workloadv1alpha1.ToSyncTargetKey(sf.SyncTargetClusterName, sf.SyncerConfig.SyncTargetName) -} - -// syncerConfigFromCluster reads the configuration needed to start an in-process -// syncer from the resources applied to a cluster for a deployed syncer. -func syncerConfigFromCluster(t *testing.T, downstreamConfig *rest.Config, namespace, syncerID string) *syncer.SyncerConfig { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - downstreamKubeClient, err := kubernetesclient.NewForConfig(downstreamConfig) - require.NoError(t, err) - - // Read the upstream kubeconfig from the syncer secret - secret, err := downstreamKubeClient.CoreV1().Secrets(namespace).Get(ctx, syncerID, metav1.GetOptions{}) - require.NoError(t, err) - upstreamConfigBytes := secret.Data[workloadcliplugin.SyncerSecretConfigKey] - require.NotEmpty(t, upstreamConfigBytes, "upstream config is required") - upstreamConfig, err := clientcmd.RESTConfigFromKubeConfig(upstreamConfigBytes) - require.NoError(t, err, "failed to load upstream config") - - // Read the arguments from the syncer deployment - deployment, err := downstreamKubeClient.AppsV1().Deployments(namespace).Get(ctx, syncerID, metav1.GetOptions{}) - require.NoError(t, err) - containers := deployment.Spec.Template.Spec.Containers - require.NotEmpty(t, containers, "expected at least one container in syncer deployment") - argMap, err := syncerArgsToMap(containers[0].Args) - require.NoError(t, err) - - require.NotEmpty(t, argMap["--sync-target-name"], "--sync-target-name is required") - syncTargetName := argMap["--sync-target-name"][0] - require.NotEmpty(t, syncTargetName, "a value for --sync-target-name is required") - - require.NotEmpty(t, argMap["--from-cluster"], "--sync-target-name is required") - fromCluster := argMap["--from-cluster"][0] - require.NotEmpty(t, fromCluster, "a value for --from-cluster is required") - syncTargetPath := logicalcluster.NewPath(fromCluster) - - resourcesToSync := argMap["--resources"] - require.NotEmpty(t, fromCluster, "--resources is required") - - require.NotEmpty(t, argMap["--dns-image"], "--dns-image is required") - dnsImage := argMap["--dns-image"][0] - - syncTargetUID := argMap["--sync-target-uid"][0] - - // Read the downstream token from the deployment's service account secret - var tokenSecret corev1.Secret - Eventually(t, func() (bool, string) { - secrets, err := downstreamKubeClient.CoreV1().Secrets(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Errorf("failed to list secrets: %v", err) - return false, fmt.Sprintf("failed to list secrets downstream: %v", err) - } - for _, secret := range secrets.Items { - t.Logf("checking secret %s/%s for annotation %s=%s", secret.Namespace, secret.Name, corev1.ServiceAccountNameKey, syncerID) - if secret.Annotations[corev1.ServiceAccountNameKey] == syncerID { - tokenSecret = secret - return len(secret.Data["token"]) > 0, fmt.Sprintf("token secret %s/%s for service account %s found", namespace, secret.Name, syncerID) - } - } - return false, fmt.Sprintf("token secret for service account %s/%s not found", namespace, syncerID) - }, wait.ForeverTestTimeout, time.Millisecond*100, "token secret in namespace %q for syncer service account %q not found", namespace, syncerID) - token := tokenSecret.Data["token"] - require.NotEmpty(t, token, "token is required") - - // Compose a new downstream config that uses the token - downstreamConfigWithToken := ConfigWithToken(string(token), rest.CopyConfig(downstreamConfig)) - return &syncer.SyncerConfig{ - UpstreamConfig: upstreamConfig, - DownstreamConfig: downstreamConfigWithToken, - ResourcesToSync: sets.New[string](resourcesToSync...), - SyncTargetPath: syncTargetPath, - SyncTargetName: syncTargetName, - SyncTargetUID: syncTargetUID, - DNSImage: dnsImage, - DownstreamNamespaceCleanDelay: 2 * time.Second, - } -} - -// syncerArgsToMap converts the cli argument list from a syncer deployment into a map -// keyed by flags. -func syncerArgsToMap(args []string) (map[string][]string, error) { - argMap := map[string][]string{} - for _, arg := range args { - argParts := strings.SplitN(arg, "=", 2) - if len(argParts) != 2 { - return nil, fmt.Errorf("arg %q isn't of the expected form `=`", arg) - } - key, value := argParts[0], argParts[1] - if _, ok := argMap[key]; !ok { - argMap[key] = []string{value} - } else { - argMap[key] = append(argMap[key], value) - } - } - return argMap, nil -} - -func endpoints(name, namespace string) *corev1.Endpoints { - return &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Subsets: []corev1.EndpointSubset{ - {Addresses: []corev1.EndpointAddress{ - { - IP: "8.8.8.8", - }}}, - }, - } -} - -func service(name, namespace string) *corev1.Service { - return &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: corev1.ServiceSpec{ - ClusterIP: "8.8.8.8", - }, - } -} diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 5abf8d9f097..582ef578b5a 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -62,7 +62,6 @@ import ( "github.com/kcp-dev/kcp/config/helpers" conditionsv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/apis/conditions/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" kcpscheme "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/scheme" ) @@ -292,8 +291,6 @@ func GetFreePort(t *testing.T) (string, error) { type ArtifactFunc func(*testing.T, func() (runtime.Object, error)) -type SyncTargetOption func(cluster *workloadv1alpha1.SyncTarget) - // LogicalClusterRawConfig returns the raw cluster config of the given config. func LogicalClusterRawConfig(rawConfig clientcmdapi.Config, logicalClusterName logicalcluster.Path, contextName string) clientcmdapi.Config { var ( diff --git a/test/e2e/reconciler/cluster/controller_test.go b/test/e2e/reconciler/cluster/controller_test.go deleted file mode 100644 index 2b43a79482c..00000000000 --- a/test/e2e/reconciler/cluster/controller_test.go +++ /dev/null @@ -1,227 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/rest" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - fixturewildwest "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest" - "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest" - wildwestv1alpha1 "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest/v1alpha1" - wildwestclientset "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/client/clientset/versioned" - wildwestclusterclientset "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/client/clientset/versioned/cluster" - wildwestv1alpha1client "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/client/clientset/versioned/typed/wildwest/v1alpha1" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -const testNamespace = "cluster-controller-test" -const sourceClusterName, sinkClusterName = "source", "sink" - -func TestClusterController(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - type runningServer struct { - client wildwestv1alpha1client.WildwestV1alpha1Interface - coreClient corev1client.CoreV1Interface - } - var testCases = []struct { - name string - work func(ctx context.Context, t *testing.T, servers map[string]runningServer, syncerFixture *framework.StartedSyncerFixture) - }{ - { - name: "create an object, expect spec and status to sync to sink, then delete", - work: func(ctx context.Context, t *testing.T, servers map[string]runningServer, syncerFixture *framework.StartedSyncerFixture) { - t.Helper() - kcpClient, err := kcpclientset.NewForConfig(syncerFixture.SyncerConfig.UpstreamConfig) - require.NoError(t, err) - - syncTarget, err := kcpClient.Cluster(syncerFixture.SyncerConfig.SyncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, - syncerFixture.SyncerConfig.SyncTargetName, - metav1.GetOptions{}, - ) - require.NoError(t, err) - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.GetName()) - - t.Logf("Creating cowboy timothy") - var cowboy *wildwestv1alpha1.Cowboy - require.Eventually(t, func() bool { - cowboy, err = servers[sourceClusterName].client.Cowboys(testNamespace).Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "timothy", - Namespace: testNamespace, - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: string(workloadv1alpha1.ResourceStateSync), - }, - }, - Spec: wildwestv1alpha1.CowboySpec{Intent: "yeehaw"}, - }, metav1.CreateOptions{}) - if err != nil { - t.Logf("err: %v", err) - } - - return err == nil - }, wait.ForeverTestTimeout, time.Millisecond*100, "expected cowboy resource to be created") - - nsLocator := shared.NewNamespaceLocator(logicalcluster.From(cowboy), logicalcluster.From(syncTarget), syncTarget.GetUID(), syncTarget.GetName(), cowboy.Namespace) - targetNamespace, err := shared.PhysicalClusterNamespaceName(nsLocator) - require.NoError(t, err, "Error determining namespace mapping for %v", nsLocator) - - t.Logf("Expecting namespace %s to show up in sink", targetNamespace) - require.Eventually(t, func() bool { - if _, err = servers[sinkClusterName].coreClient.Namespaces().Get(ctx, targetNamespace, metav1.GetOptions{}); err != nil { - if apierrors.IsNotFound(err) { - return false - } - require.NoError(t, err, "Error getting namespace %q", targetNamespace) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "expected namespace to be created in sink") - - t.Logf("Expecting same spec to show up in sink") - framework.Eventually(t, func() (bool, string) { - if got, err := servers[sinkClusterName].client.Cowboys(targetNamespace).Get(ctx, cowboy.Name, metav1.GetOptions{}); err != nil { - if apierrors.IsNotFound(err) { - cowboy, err := servers[sourceClusterName].client.Cowboys(testNamespace).Get(ctx, cowboy.Name, metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("error getting cowboy %q: %v", cowboy.Name, err) - } - return false, "Downstream cowboy couldn't be found." - } - return false, fmt.Sprintf("error getting cowboy %q in sink: %v", cowboy.Name, err) - } else if diff := cmp.Diff(cowboy.Spec, got.Spec); diff != "" { - return false, fmt.Sprintf("spec mismatch (-want +got):\n%s", diff) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "expected cowboy to be synced to sink with right spec") - - t.Logf("Patching status in sink") - updated, err := servers[sinkClusterName].client.Cowboys(targetNamespace).Patch(ctx, cowboy.Name, types.MergePatchType, []byte(`{"status":{"result":"giddyup"}}`), metav1.PatchOptions{}, "status") - require.NoError(t, err, "failed to patch cowboy status in sink") - - t.Logf("Expecting status update to show up in source") - require.Eventually(t, func() bool { - if got, err := servers[sourceClusterName].client.Cowboys(testNamespace).Get(ctx, cowboy.Name, metav1.GetOptions{}); err != nil { - if apierrors.IsNotFound(err) { - return false - } - t.Logf("Error getting cowboy %q in source: %v", cowboy.Name, err) - return false - } else if diff := cmp.Diff(updated.Status, got.Status); diff != "" { - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "expected source to show status change") - - t.Logf("Deleting object in the source") - err = servers[sourceClusterName].client.Cowboys(testNamespace).Delete(ctx, cowboy.Name, metav1.DeleteOptions{}) - require.NoError(t, err, "error deleting source cowboy") - - // TODO(ncdc): the expect code for cowboys currently expects the cowboy to exist. See if we can adjust it - // so we can reuse that here instead of polling. - t.Logf("Expecting the object in the sink to be deleted") - require.Eventually(t, func() bool { - _, err := servers[sinkClusterName].client.Cowboys(targetNamespace).Get(ctx, cowboy.Name, metav1.GetOptions{}) - return apierrors.IsNotFound(err) - }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected sink cowboy to be deleted") - }, - }, - } - - source := framework.SharedKcpServer(t) - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - t.Log("Creating a workspace") - wsPath, _ := framework.NewWorkspaceFixture(t, source, orgPath, framework.WithName("source"), framework.TODO_WithoutMultiShardSupport()) - - // clients - sourceConfig := source.BaseConfig(t) - - sourceKubeClient, err := kcpkubernetesclientset.NewForConfig(sourceConfig) - require.NoError(t, err) - - sourceWildwestClusterClient, err := wildwestclusterclientset.NewForConfig(sourceConfig) - require.NoError(t, err) - - syncerFixture := framework.NewSyncerFixture(t, source, wsPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - t.Log("Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - })).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - t.Logf("Bind second user workspace to location workspace") - framework.NewBindCompute(t, wsPath, source, framework.WithAPIExportsWorkloadBindOption(workloadv1alpha1.ImportedAPISExportName)).Bind(t) - - sinkWildwestClient, err := wildwestclientset.NewForConfig(syncerFixture.DownstreamConfig) - require.NoError(t, err) - - t.Log("Creating namespace in source cluster...") - _, err = sourceKubeClient.Cluster(wsPath).CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: testNamespace}, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - runningServers := map[string]runningServer{ - sourceClusterName: { - client: sourceWildwestClusterClient.Cluster(wsPath).WildwestV1alpha1(), - coreClient: sourceKubeClient.Cluster(wsPath).CoreV1(), - }, - sinkClusterName: { - client: sinkWildwestClient.WildwestV1alpha1(), - coreClient: syncerFixture.DownstreamKubeClient.CoreV1(), - }, - } - - t.Log("Starting test...") - testCase.work(ctx, t, runningServers, syncerFixture) - }) - } -} diff --git a/test/e2e/reconciler/deployment/deployment_coordinator_test.go b/test/e2e/reconciler/deployment/deployment_coordinator_test.go deleted file mode 100644 index fe72a3172bd..00000000000 --- a/test/e2e/reconciler/deployment/deployment_coordinator_test.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deployment - -import ( - "context" - "fmt" - "sort" - "strings" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" - "github.com/kcp-dev/kcp/test/e2e/reconciler/deployment/locations" - "github.com/kcp-dev/kcp/test/e2e/reconciler/deployment/workloads" -) - -func TestDeploymentCoordinator(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster:requires-kind") - - if len(framework.TestConfig.PClusterKubeconfig()) == 0 { - t.Skip("Test requires a pcluster") - } - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upstreamServer := framework.SharedKcpServer(t) - - upstreamConfig := upstreamServer.BaseConfig(t) - upstreamKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - kcpClusterClient, err := kcpclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - - locationWorkspacePath, _ := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("synctargets"), framework.TODO_WithoutMultiShardSupport()) - - workloadWorkspace1Path, workloadWorkspace1 := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("workload-1"), framework.TODO_WithoutMultiShardSupport()) - workloadWorkspace2Path, workloadWorkspace2 := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("workload-2"), framework.TODO_WithoutMultiShardSupport()) - - eastSyncer := framework.NewSyncerFixture(t, upstreamServer, locationWorkspacePath, - framework.WithSyncTargetName("east"), - framework.WithSyncedUserWorkspaces(workloadWorkspace1, workloadWorkspace2), - framework.WithSyncTargetLabels(map[string]string{"region": "east"}), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - westSyncer := framework.NewSyncerFixture(t, upstreamServer, locationWorkspacePath, - framework.WithSyncTargetName("west"), - framework.WithSyncedUserWorkspaces(workloadWorkspace1, workloadWorkspace2), - framework.WithSyncTargetLabels(map[string]string{"region": "west"}), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - eastSyncer.WaitForSyncTargetReady(ctx, t) - westSyncer.WaitForSyncTargetReady(ctx, t) - - t.Logf("Create 2 locations, one for each SyncTargets") - err = framework.CreateResources(ctx, locations.FS, upstreamConfig, locationWorkspacePath) - require.NoError(t, err) - - t.Logf("Bind workload workspace 1 to location workspace for the east location") - framework.NewBindCompute(t, workloadWorkspace1Path, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "east"}, - }), - ).Bind(t) - - t.Logf("Bind workload workspace 2 to location workspace for the east location") - framework.NewBindCompute(t, workloadWorkspace2Path, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "east"}, - }), - ).Bind(t) - - t.Logf("Bind workload workspace 1 to location workspace for the west location") - framework.NewBindCompute(t, workloadWorkspace1Path, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "west"}, - }), - ).Bind(t) - - t.Logf("Bind workload workspace 2 to location workspace for the west location") - framework.NewBindCompute(t, workloadWorkspace2Path, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "west"}, - }), - ).Bind(t) - - t.Logf("Get the root compute APIExport Virtual Workspace URL") - - rootKubernetesAPIExport, err := kcpClusterClient.Cluster(logicalcluster.NewPath("root:compute")).ApisV1alpha1().APIExports().Get(ctx, "kubernetes", metav1.GetOptions{}) - require.NoError(t, err, "failed to retrieve Root compute kubernetes APIExport") - - //nolint:staticcheck // SA1019 VirtualWorkspaces is deprecated but not removed yet - require.GreaterOrEqual(t, len(rootKubernetesAPIExport.Status.VirtualWorkspaces), 1, "Root compute kubernetes APIExport should contain at least one virtual workspace URL") - - //nolint:staticcheck // SA1019 VirtualWorkspaces is deprecated but not removed yet - rootComputeKubernetesURL := rootKubernetesAPIExport.Status.VirtualWorkspaces[0].URL - - rootComputeConfig := rest.CopyConfig(upstreamConfig) - rootComputeConfig.Host = rootComputeKubernetesURL - rootComputeClusterClient, err := kcpkubernetesclientset.NewForConfig(rootComputeConfig) - require.NoError(t, err) - - framework.Eventually(t, func() (success bool, reason string) { - t.Logf("Checking deployment access through a list") - _, err := rootComputeClusterClient.AppsV1().Deployments().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Sprintf("deployments should be exposed by the root compute APIExport URL. But listing deployments produced the following error: %v", err) - } - return true, "deployments are exposed" - }, wait.ForeverTestTimeout, time.Millisecond*500, "deployments should be exposed by the root compute APIExport URL") - - t.Logf("Start the Deployment controller") - - artifactDir, _, err := framework.ScratchDirs(t) - require.NoError(t, err) - - executableName := "deployment-coordinator" - cmd := append(framework.DirectOrGoRunCommand(executableName), - "--kubeconfig="+upstreamServer.KubeconfigPath(), - "--context=base", - "--server="+rootComputeKubernetesURL, - ) - - deploymentCoordinator := framework.NewAccessory(t, artifactDir, executableName, cmd...) - err = deploymentCoordinator.Run(t, framework.WithLogStreaming) - require.NoError(t, err, "failed to start deployment coordinator") - - downstreamKubeClient, err := kubernetes.NewForConfig(eastSyncer.DownstreamConfig) - require.NoError(t, err) - - type downstreamInfo struct { - lastEventsOnEast time.Time - lastEventsOnWest time.Time - logStateOnEast map[string]*metav1.Time - logStateOnWest map[string]*metav1.Time - namespaceOnEast string - namespaceOnWest string - } - - dumpEventsAndPods := func(di downstreamInfo) downstreamInfo { - di.lastEventsOnEast = dumpPodEvents(ctx, t, di.lastEventsOnEast, downstreamKubeClient, di.namespaceOnEast) - di.lastEventsOnWest = dumpPodEvents(ctx, t, di.lastEventsOnWest, downstreamKubeClient, di.namespaceOnWest) - di.logStateOnEast = dumpPodLogs(ctx, t, di.logStateOnEast, downstreamKubeClient, di.namespaceOnEast) - di.logStateOnWest = dumpPodLogs(ctx, t, di.logStateOnWest, downstreamKubeClient, di.namespaceOnWest) - return di - } - - for _, workspace := range []struct { - clusterName logicalcluster.Name - requestedReplicas int32 - }{ - { - clusterName: logicalcluster.Name(workloadWorkspace1.Spec.Cluster), - requestedReplicas: 4, - }, - { - clusterName: logicalcluster.Name(workloadWorkspace2.Spec.Cluster), - requestedReplicas: 8, - }, - } { - wkspDownstreamInfo := downstreamInfo{ - namespaceOnEast: eastSyncer.DownstreamNamespaceFor(t, workspace.clusterName, "default"), - namespaceOnWest: westSyncer.DownstreamNamespaceFor(t, workspace.clusterName, "default"), - } - - t.Logf("Create workload in workload workspace %q, with replicas set to %d", workspace.clusterName, workspace.requestedReplicas) - framework.Eventually(t, func() (bool, string) { - if err := framework.CreateResources(ctx, workloads.FS, upstreamConfig, workspace.clusterName.Path(), func(bs []byte) ([]byte, error) { - yaml := string(bs) - yaml = strings.Replace(yaml, "replicas: 1", fmt.Sprintf("replicas: %d", workspace.requestedReplicas), 1) - return []byte(yaml), nil - }); err == nil { - return true, "" - } else { - return false, err.Error() - } - }, wait.ForeverTestTimeout, time.Millisecond*100, "should create the deployment after the deployments resource is available in workspace %q", workspace.clusterName) - - t.Logf("Wait for the workload in workspace %q to be started and available with %d replicas", workspace.clusterName, workspace.requestedReplicas) - func() { - defer dumpEventsAndPods(wkspDownstreamInfo) - - framework.Eventually(t, func() (success bool, reason string) { - deployment, err := upstreamKubeClusterClient.Cluster(workspace.clusterName.Path()).AppsV1().Deployments("default").Get(ctx, "test", metav1.GetOptions{}) - require.NoError(t, err) - - // TODO(davidfestal): the 2 checks below are necessary here to avoid the test to be flaky since for now the coordination - // controller doesn't delay the syncing before setting the transformation annotations. - // So it could be synced with 8 replicas on each Synctarget at start, for a very small amount of time, which might - // seem like deployment replicas would have been spread, though in fact it is not. - if _, exists := deployment.GetAnnotations()["experimental.spec-diff.workload.kcp.io/"+eastSyncer.ToSyncTargetKey()]; !exists { - return false, fmt.Sprintf("Deployment %s/%s should have been prepared for transformation by the coordinator for the east syncTarget", workspace.clusterName, "test") - } - if _, exists := deployment.GetAnnotations()["experimental.spec-diff.workload.kcp.io/"+westSyncer.ToSyncTargetKey()]; !exists { - return false, fmt.Sprintf("Deployment %s/%s should have been prepared for transformation by the coordinator for the west syncTarget", workspace.clusterName, "test") - } - - // TODO(davidfestal): the 2 checks below are necessary here to avoid the test to be flaky since for now the coordination - // controller doesn't delay the syncing before setting the transformation annotations. - // So it could be synced with 8 replicas on each Synctarget at start, for a very small amount of time, which might - // seem like deployment replicas would have been spread, though in fact it is not. - if _, exists := deployment.GetAnnotations()["diff.syncer.internal.kcp.io/"+eastSyncer.ToSyncTargetKey()]; !exists { - return false, fmt.Sprintf("Status of deployment %s/%s should have been updated by the east syncer", workspace.clusterName, "test") - } - if _, exists := deployment.GetAnnotations()["experimental.spec-diff.workload.kcp.io/"+westSyncer.ToSyncTargetKey()]; !exists { - return false, fmt.Sprintf("Status of deployment %s/%s should have been updated by the west syncer", workspace.clusterName, "test") - } - - if actual, expected := deployment.Status.AvailableReplicas, workspace.requestedReplicas; actual != expected { - return false, fmt.Sprintf("Deployment %s/%s had %d available replicas, not %d", workspace.clusterName, "test", actual, expected) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "deployment %s/%s was not synced", workspace.clusterName, "test") - }() - - t.Logf("Check that each deployment on each SyncTarget has half the number of replicas") - downstreamDeploymentOnEastForWorkspace1, err := downstreamKubeClient.AppsV1().Deployments(wkspDownstreamInfo.namespaceOnEast).Get(ctx, "test", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, workspace.requestedReplicas/2, downstreamDeploymentOnEastForWorkspace1.Status.AvailableReplicas, "East syncer should have received half of the replicas for workspace %q workload", workspace.clusterName) - - downstreamDeploymentOnWestForWorkspace1, err := downstreamKubeClient.AppsV1().Deployments(wkspDownstreamInfo.namespaceOnWest).Get(ctx, "test", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, workspace.requestedReplicas/2, downstreamDeploymentOnWestForWorkspace1.Status.AvailableReplicas, "West syncer should have received half of the replicas for workspace %q workload", workspace.clusterName) - } -} - -func dumpPodEvents(ctx context.Context, t *testing.T, startAfter time.Time, downstreamKubeClient kubernetes.Interface, downstreamNamespaceName string) time.Time { - t.Helper() - - eventList, err := downstreamKubeClient.CoreV1().Events(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting events: %v", err) - return startAfter // ignore. Error here are not the ones we care for. - } - - sort.Slice(eventList.Items, func(i, j int) bool { - return eventList.Items[i].LastTimestamp.Time.Before(eventList.Items[j].LastTimestamp.Time) - }) - - last := startAfter - for _, event := range eventList.Items { - if event.InvolvedObject.Kind != "Pod" { - continue - } - if event.LastTimestamp.After(startAfter) { - t.Logf("Event for pod %s/%s: %s", event.InvolvedObject.Namespace, event.InvolvedObject.Name, event.Message) - } - if event.LastTimestamp.After(last) { - last = event.LastTimestamp.Time - } - } - - pods, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting pods: %v", err) - return last // ignore. Error here are not the ones we care for. - } - - for _, pod := range pods.Items { - for _, s := range pod.Status.ContainerStatuses { - if s.State.Terminated != nil && s.State.Terminated.FinishedAt.After(startAfter) { - t.Logf("Pod %s/%s container %s terminated with exit code %d: %s", pod.Namespace, pod.Name, s.Name, s.State.Terminated.ExitCode, s.State.Terminated.Message) - } - } - } - - return last -} - -func dumpPodLogs(ctx context.Context, t *testing.T, startAfter map[string]*metav1.Time, downstreamKubeClient kubernetes.Interface, downstreamNamespaceName string) map[string]*metav1.Time { - t.Helper() - - if startAfter == nil { - startAfter = make(map[string]*metav1.Time) - } - - pods, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting pods: %v", err) - return startAfter // ignore. Error here are not the ones we care for. - } - for _, pod := range pods.Items { - for _, c := range pod.Spec.Containers { - key := fmt.Sprintf("%s/%s", pod.Name, c.Name) - now := metav1.Now() - res, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).GetLogs(pod.Name, &corev1.PodLogOptions{ - SinceTime: startAfter[key], - Container: c.Name, - }).DoRaw(ctx) - if err != nil { - t.Logf("Failed to get logs for pod %s/%s container %s: %v", pod.Namespace, pod.Name, c.Name, err) - continue - } - for _, line := range strings.Split(string(res), "\n") { - t.Logf("Pod %s/%s container %s: %s", pod.Namespace, pod.Name, c.Name, line) - } - startAfter[key] = &now - } - } - - return startAfter -} diff --git a/test/e2e/reconciler/deployment/locations/east.yaml b/test/e2e/reconciler/deployment/locations/east.yaml deleted file mode 100644 index f2f6e009c61..00000000000 --- a/test/e2e/reconciler/deployment/locations/east.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: scheduling.kcp.io/v1alpha1 -kind: Location -metadata: - name: east - labels: - region: east -spec: - instanceSelector: - matchLabels: - region: east - resource: - group: workload.kcp.io - resource: synctargets - version: v1alpha1 diff --git a/test/e2e/reconciler/deployment/locations/embed.go b/test/e2e/reconciler/deployment/locations/embed.go deleted file mode 100644 index c9b07a5dd2e..00000000000 --- a/test/e2e/reconciler/deployment/locations/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package locations - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/reconciler/deployment/locations/west.yaml b/test/e2e/reconciler/deployment/locations/west.yaml deleted file mode 100644 index be12294541d..00000000000 --- a/test/e2e/reconciler/deployment/locations/west.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: scheduling.kcp.io/v1alpha1 -kind: Location -metadata: - name: west - labels: - region: west -spec: - instanceSelector: - matchLabels: - region: west - resource: - group: workload.kcp.io - resource: synctargets - version: v1alpha1 diff --git a/test/e2e/reconciler/deployment/workloads/deployment.yaml b/test/e2e/reconciler/deployment/workloads/deployment.yaml deleted file mode 100644 index 9ae94cecc94..00000000000 --- a/test/e2e/reconciler/deployment/workloads/deployment.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: test - template: - metadata: - labels: - app: test - spec: - containers: - - name: busybox - image: ghcr.io/distroless/busybox:latest - command: - - /bin/sh - - -ec - - | - echo "Going to sleep" - tail -f /dev/null diff --git a/test/e2e/reconciler/deployment/workloads/embed.go b/test/e2e/reconciler/deployment/workloads/embed.go deleted file mode 100644 index b00d535dc5a..00000000000 --- a/test/e2e/reconciler/deployment/workloads/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workloads - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/syncer/configmap-admin-role.yaml b/test/e2e/syncer/configmap-admin-role.yaml deleted file mode 100644 index 54a42df2df0..00000000000 --- a/test/e2e/syncer/configmap-admin-role.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: configmap-admin -rules: - - apiGroups: - - "" - resources: - - configmaps - verbs: - - "*" \ No newline at end of file diff --git a/test/e2e/syncer/configmap-admin-rolebinding.yaml b/test/e2e/syncer/configmap-admin-rolebinding.yaml deleted file mode 100644 index 9e4611506f2..00000000000 --- a/test/e2e/syncer/configmap-admin-rolebinding.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: default:configmap-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: configmap-admin -subjects: - - kind: ServiceAccount - name: default \ No newline at end of file diff --git a/test/e2e/syncer/deployment.yaml b/test/e2e/syncer/deployment.yaml deleted file mode 100644 index 72b4d93e403..00000000000 --- a/test/e2e/syncer/deployment.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: syncer-test -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: busybox - image: ghcr.io/distroless/busybox:latest - command: - - /bin/sh - - -ec - - | - echo "Going to sleep" - tail -f /dev/null diff --git a/test/e2e/syncer/dns/dns_test.go b/test/e2e/syncer/dns/dns_test.go deleted file mode 100644 index 0e20b7a6484..00000000000 --- a/test/e2e/syncer/dns/dns_test.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package dns - -import ( - "context" - "fmt" - "strings" - "testing" - "time" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" - - "github.com/kcp-dev/kcp/test/e2e/framework" - "github.com/kcp-dev/kcp/test/e2e/syncer/dns/workspace1" - "github.com/kcp-dev/kcp/test/e2e/syncer/dns/workspace2" -) - -func TestDNSResolution(t *testing.T) { - t.Skip("Test is flaky, and we are going to remove the syncer soon anyway") - - t.Parallel() - framework.Suite(t, "transparent-multi-cluster:requires-kind") - - if len(framework.TestConfig.PClusterKubeconfig()) == 0 { - t.Skip("Test requires a pcluster") - } - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upstreamServer := framework.SharedKcpServer(t) - - upstreamConfig := upstreamServer.BaseConfig(t) - - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - - locationWorkspacePath, _ := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("location"), framework.TODO_WithoutMultiShardSupport()) - - workloadWorkspace1Path, workloadWorkspace1 := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("workload-1"), framework.TODO_WithoutMultiShardSupport()) - workloadWorkspace2Path, workloadWorkspace2 := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("workload-2"), framework.TODO_WithoutMultiShardSupport()) - - syncer := framework.NewSyncerFixture(t, upstreamServer, locationWorkspacePath, - framework.WithSyncedUserWorkspaces(workloadWorkspace1, workloadWorkspace2), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - syncer.WaitForSyncTargetReady(ctx, t) - - downstreamKubeClient, err := kubernetes.NewForConfig(syncer.DownstreamConfig) - require.NoError(t, err) - - t.Logf("Bind workload workspace 1 to location workspace") - framework.NewBindCompute(t, workloadWorkspace1Path, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - ).Bind(t) - - t.Logf("Bind workload workspace 2 to location workspace") - framework.NewBindCompute(t, workloadWorkspace2Path, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - ).Bind(t) - - err = framework.CreateResources(ctx, workspace1.FS, upstreamConfig, workloadWorkspace1Path) - require.NoError(t, err) - - err = framework.CreateResources(ctx, workspace2.FS, upstreamConfig, workloadWorkspace2Path) - require.NoError(t, err) - - downstreamWS1NS1 := syncer.DownstreamNamespaceFor(t, logicalcluster.Name(workloadWorkspace1.Spec.Cluster), "dns-ws1-ns1") - t.Logf("Downstream namespace 1 in workspace 1 is %s", downstreamWS1NS1) - - downstreamWS1NS2 := syncer.DownstreamNamespaceFor(t, logicalcluster.Name(workloadWorkspace1.Spec.Cluster), "dns-ws1-ns2") - t.Logf("Downstream namespace 2 in workspace 1 is %s", downstreamWS1NS2) - - downstreamWS2NS1 := syncer.DownstreamNamespaceFor(t, logicalcluster.Name(workloadWorkspace2.Spec.Cluster), "dns-ws2-ns1") - t.Logf("Downstream namespace 1 in workspace 2 is %s", downstreamWS2NS1) - - t.Log("Checking network policies have been created") - framework.Eventually(t, func() (success bool, reason string) { - np, err := downstreamKubeClient.NetworkingV1().NetworkPolicies(syncer.SyncerID).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Sprintf("error while getting network policies: %v\n", err) - } - if len(np.Items) != 2 { - return false, fmt.Sprintf("expecting 2 network policies, got: %d\n", len(np.Items)) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "Network policies haven't been created") - - t.Log("Checking fully qualified DNS name resolves") - framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS1, "ping-fully-qualified", "PING svc.dns-ws1-ns1.svc.cluster.local ("), - wait.ForeverTestTimeout, time.Millisecond*500, "Service name was not resolved") - - t.Log("Checking not qualified DNS name resolves") - framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS1, "ping-not-qualified", "PING svc ("), - wait.ForeverTestTimeout, time.Millisecond*500, "Service name was not resolved") - - t.Log("Checking DNS name resolves across namespaces in same workspace") - framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS2, "ping-across-namespace", "PING svc.dns-ws1-ns1 ("), - wait.ForeverTestTimeout, time.Millisecond*500, "Service name was not resolved") - - t.Log("Checking DNS name does not resolve across workspaces") - framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS2NS1, "ping-fully-qualified-fail", "ping: bad"), - wait.ForeverTestTimeout, time.Millisecond*500, "Service name was resolved") - - t.Log("Change ping-fully-qualified deployment DNS config to use workspace 2 nameserver and check the DNS name does not resolve") - dnsServices, err := downstreamKubeClient.CoreV1().Services(syncer.SyncerID).List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.True(t, len(dnsServices.Items) >= 2) - - deployment, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS1).Get(ctx, "ping-fully-qualified", metav1.GetOptions{}) - require.NoError(t, err) - - existingDNSIP := deployment.Spec.Template.Spec.DNSConfig.Nameservers[0] - newDNSIP := "" - for _, svc := range dnsServices.Items { - if strings.HasPrefix(svc.Name, "kcp-dns-") { - if svc.Spec.ClusterIP != existingDNSIP { - newDNSIP = svc.Spec.ClusterIP - break - } - } - } - require.NotEmpty(t, newDNSIP, "could not find another DNS service") - deployment.Spec.Template.Spec.DNSConfig.Nameservers[0] = newDNSIP - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - deployment, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS1).Get(ctx, "ping-fully-qualified", metav1.GetOptions{}) - if err != nil { - return err - } - - deployment.Spec.Template.Spec.DNSConfig.Nameservers[0] = newDNSIP - _, err = downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS1).Update(ctx, deployment, metav1.UpdateOptions{}) - return err - }) - require.NoError(t, err) - - framework.Eventually(t, checkLogs(ctx, t, downstreamKubeClient, downstreamWS1NS1, "ping-fully-qualified", "ping: bad"), - wait.ForeverTestTimeout, time.Millisecond*500, "Service name was resolved") -} - -//nolint:unused -func checkLogs(ctx context.Context, t *testing.T, downstreamKubeClient *kubernetes.Clientset, downstreamNamespace, containerName, expectedPrefix string) func() (success bool, reason string) { - t.Helper() - - return func() (success bool, reason string) { - pods, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Sprintf("Error getting pods: %v", err) - } - - for _, pod := range pods.Items { - for _, c := range pod.Spec.Containers { - if c.Name == containerName { - res, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespace).GetLogs(pod.Name, &corev1.PodLogOptions{ - Container: c.Name, - }).DoRaw(ctx) - - if err != nil { - return false, fmt.Sprintf("Failed to get logs for pod %s/%s container %s: %v", pod.Namespace, pod.Name, c.Name, err) - } - - for _, line := range strings.Split(string(res), "\n") { - t.Logf("Pod %s/%s container %s: %s", pod.Namespace, pod.Name, c.Name, line) - if strings.HasPrefix(line, expectedPrefix) { - return true, "" - } - } - } - } - } - return false, "no pods" - } -} diff --git a/test/e2e/syncer/dns/workspace1/0-namespace1.yaml b/test/e2e/syncer/dns/workspace1/0-namespace1.yaml deleted file mode 100644 index aefe47b9a1c..00000000000 --- a/test/e2e/syncer/dns/workspace1/0-namespace1.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: dns-ws1-ns1 diff --git a/test/e2e/syncer/dns/workspace1/0-namespace2.yaml b/test/e2e/syncer/dns/workspace1/0-namespace2.yaml deleted file mode 100644 index 4e33b07b8c1..00000000000 --- a/test/e2e/syncer/dns/workspace1/0-namespace2.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: dns-ws1-ns2 diff --git a/test/e2e/syncer/dns/workspace1/embed.go b/test/e2e/syncer/dns/workspace1/embed.go deleted file mode 100644 index 8875105a80f..00000000000 --- a/test/e2e/syncer/dns/workspace1/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workspace1 - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/syncer/dns/workspace1/ping-across-namespace.yaml b/test/e2e/syncer/dns/workspace1/ping-across-namespace.yaml deleted file mode 100644 index defa2e22855..00000000000 --- a/test/e2e/syncer/dns/workspace1/ping-across-namespace.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ping-across-namespace - namespace: dns-ws1-ns2 -spec: - selector: - matchLabels: - app: ping-across-namespace - template: - metadata: - labels: - app: ping-across-namespace - spec: - containers: - - name: ping-across-namespace - image: ghcr.io/distroless/alpine-base:latest - command: ['sh', '-c', 'until ping svc.dns-ws1-ns1; do sleep 1; done'] diff --git a/test/e2e/syncer/dns/workspace1/ping-fully-qualified.yaml b/test/e2e/syncer/dns/workspace1/ping-fully-qualified.yaml deleted file mode 100644 index c6fa8ff10ea..00000000000 --- a/test/e2e/syncer/dns/workspace1/ping-fully-qualified.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ping-fully-qualified - namespace: dns-ws1-ns1 -spec: - selector: - matchLabels: - app: ping-fully-qualified - template: - metadata: - labels: - app: ping-fully-qualified - spec: - containers: - - name: ping-fully-qualified - image: ghcr.io/distroless/alpine-base:latest - command: ['sh', '-c', 'until ping svc.dns-ws1-ns1.svc.cluster.local; do sleep 1; done'] diff --git a/test/e2e/syncer/dns/workspace1/ping-not-qualified.yaml b/test/e2e/syncer/dns/workspace1/ping-not-qualified.yaml deleted file mode 100644 index d66c263ef7c..00000000000 --- a/test/e2e/syncer/dns/workspace1/ping-not-qualified.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ping-not-qualified - namespace: dns-ws1-ns1 -spec: - selector: - matchLabels: - app: ping-not-qualified - template: - metadata: - labels: - app: ping-not-qualified - spec: - containers: - - name: ping-not-qualified - image: ghcr.io/distroless/alpine-base:latest - command: ['sh', '-c', 'until ping svc; do sleep 1; done'] diff --git a/test/e2e/syncer/dns/workspace1/service.yaml b/test/e2e/syncer/dns/workspace1/service.yaml deleted file mode 100644 index a5a488cab2c..00000000000 --- a/test/e2e/syncer/dns/workspace1/service.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: svc - namespace: dns-ws1-ns1 -spec: - ports: - - protocol: TCP - port: 80 - targetPort: 80 diff --git a/test/e2e/syncer/dns/workspace2/0-namespace.yaml b/test/e2e/syncer/dns/workspace2/0-namespace.yaml deleted file mode 100644 index 4719c0131c4..00000000000 --- a/test/e2e/syncer/dns/workspace2/0-namespace.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: dns-ws2-ns1 diff --git a/test/e2e/syncer/dns/workspace2/embed.go b/test/e2e/syncer/dns/workspace2/embed.go deleted file mode 100644 index 85b505e1089..00000000000 --- a/test/e2e/syncer/dns/workspace2/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workspace2 - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/syncer/dns/workspace2/ping-fully-qualified-fail.yaml b/test/e2e/syncer/dns/workspace2/ping-fully-qualified-fail.yaml deleted file mode 100644 index 3eb89651681..00000000000 --- a/test/e2e/syncer/dns/workspace2/ping-fully-qualified-fail.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ping-fully-qualified-fail - namespace: dns-ws2-ns1 -spec: - selector: - matchLabels: - app: ping-fully-qualified-fail - template: - metadata: - labels: - app: ping-fully-qualified-fail - spec: - containers: - - name: ping-fully-qualified-fail - image: ghcr.io/distroless/alpine-base:latest - command: ['sh', '-c', 'until ping svc.dns-ws1-ns1.svc.cluster.local; do sleep 1; done'] diff --git a/test/e2e/syncer/endpoints/deployment-with-upsync.yaml b/test/e2e/syncer/endpoints/deployment-with-upsync.yaml deleted file mode 100644 index 39c2453de2c..00000000000 --- a/test/e2e/syncer/endpoints/deployment-with-upsync.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: with-endpoints-upsync - namespace: default -spec: - selector: - matchLabels: - app.kubernetes.io/name: with-endpoints-upsync - template: - metadata: - labels: - app.kubernetes.io/name: with-endpoints-upsync - spec: - containers: - - name: busybox - image: ghcr.io/distroless/busybox:latest - command: - - /bin/sh - - -ec - - | - echo "Going to sleep" - tail -f /dev/null diff --git a/test/e2e/syncer/endpoints/deployment-without-upsync.yaml b/test/e2e/syncer/endpoints/deployment-without-upsync.yaml deleted file mode 100644 index 09e21e1b246..00000000000 --- a/test/e2e/syncer/endpoints/deployment-without-upsync.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: without-endpoints-upsync - namespace: default -spec: - selector: - matchLabels: - app.kubernetes.io/name: without-endpoints-upsync - template: - metadata: - labels: - app.kubernetes.io/name: without-endpoints-upsync - spec: - containers: - - name: busybox - image: ghcr.io/distroless/busybox:latest - command: - - /bin/sh - - -ec - - | - echo "Going to sleep" - tail -f /dev/null diff --git a/test/e2e/syncer/endpoints/embed.go b/test/e2e/syncer/endpoints/embed.go deleted file mode 100644 index a4323cdcb88..00000000000 --- a/test/e2e/syncer/endpoints/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/syncer/endpoints/endpoints_test.go b/test/e2e/syncer/endpoints/endpoints_test.go deleted file mode 100644 index 8ad5660c564..00000000000 --- a/test/e2e/syncer/endpoints/endpoints_test.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endpoints - -import ( - "context" - "fmt" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestEndpointsUpsyncing(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster:requires-kind") - - if len(framework.TestConfig.PClusterKubeconfig()) == 0 { - t.Skip("Test requires a pcluster") - } - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upstreamServer := framework.SharedKcpServer(t) - - upstreamConfig := upstreamServer.BaseConfig(t) - - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - - locationWorkspacePath, _ := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("location"), framework.TODO_WithoutMultiShardSupport()) - - workloadWorkspacePath, workloadWorkspace := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithName("workload"), framework.TODO_WithoutMultiShardSupport()) - - upstreamKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - syncer := framework.NewSyncerFixture(t, upstreamServer, locationWorkspacePath, - framework.WithSyncedUserWorkspaces(workloadWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - syncer.WaitForSyncTargetReady(ctx, t) - - downstreamKubeClient, err := kubernetes.NewForConfig(syncer.DownstreamConfig) - require.NoError(t, err) - - t.Logf("Bind workload workspace to location workspace") - framework.NewBindCompute(t, workloadWorkspacePath, upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWorkspacePath), - ).Bind(t) - - err = framework.CreateResources(ctx, FS, upstreamConfig, workloadWorkspacePath) - require.NoError(t, err) - - downstreamNamespace := syncer.DownstreamNamespaceFor(t, logicalcluster.Name(workloadWorkspace.Spec.Cluster), "default") - t.Logf("Downstream namespace is %s", downstreamNamespace) - - t.Log("Checking services have been created downstream") - framework.Eventually(t, func() (success bool, reason string) { - services, err := downstreamKubeClient.CoreV1().Services(downstreamNamespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Sprintf("error while getting services: %v\n", err) - } - if len(services.Items) != 2 { - return false, fmt.Sprintf("expecting 2 services, got: %d\n", len(services.Items)) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "Services haven't been created") - - t.Log("Checking endpoints have been created downstream") - framework.Eventually(t, func() (success bool, reason string) { - endpoints, err := downstreamKubeClient.CoreV1().Endpoints(downstreamNamespace).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Sprintf("error while getting endpoints: %v\n", err) - } - if len(endpoints.Items) != 2 { - return false, fmt.Sprintf("expecting 2 endpoints, got: %d\n", len(endpoints.Items)) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "Endpoints haven't been created") - - t.Log("Checking that only 1 endpoint has been upsynced upstream") - framework.Eventually(t, func() (success bool, reason string) { - endpoints, err := upstreamKubeClusterClient.Cluster(workloadWorkspacePath).CoreV1().Endpoints("default").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Sprintf("error while getting endpoints: %v\n", err) - } - if len(endpoints.Items) != 1 { - return false, fmt.Sprintf("expecting 1 endpoint, got: %d\n", len(endpoints.Items)) - } - - if endpoints.Items[0].Name != "with-endpoints-upsync" { - return false, fmt.Sprintf("expecting endpoint with name 'with-endpoints-upsync', got: %s\n", endpoints.Items[0].Name) - } - - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "Endpoints hasn't been upsynced") -} diff --git a/test/e2e/syncer/endpoints/service-with-upsync.yaml b/test/e2e/syncer/endpoints/service-with-upsync.yaml deleted file mode 100644 index 19167915ecf..00000000000 --- a/test/e2e/syncer/endpoints/service-with-upsync.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: with-endpoints-upsync - namespace: default - annotations: - experimental.workload.kcp.io/upsync-derived-resources: endpoints -spec: - selector: - app.kubernetes.io/name: with-endpoints-upsync - ports: - - protocol: TCP - port: 80 - targetPort: 80 diff --git a/test/e2e/syncer/endpoints/service-without-upsync.yaml b/test/e2e/syncer/endpoints/service-without-upsync.yaml deleted file mode 100644 index c01f39735e3..00000000000 --- a/test/e2e/syncer/endpoints/service-without-upsync.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: without-endpoints-upsync - namespace: default -spec: - selector: - app.kubernetes.io/name: without-endpoints-upsync - ports: - - protocol: TCP - port: 80 - targetPort: 80 diff --git a/test/e2e/syncer/in-cluster-config-test-deployment.yaml b/test/e2e/syncer/in-cluster-config-test-deployment.yaml deleted file mode 100644 index d899548811e..00000000000 --- a/test/e2e/syncer/in-cluster-config-test-deployment.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: icc-test -spec: - replicas: 1 - selector: - matchLabels: - app: icc-test - template: - metadata: - labels: - app: icc-test - spec: - containers: - - name: icc-test - image: ghcr.io/kcp-dev/kcp/kcp-test-image:main - env: - - name: CONFIGMAP_NAME - value: expected-configmap - # TODO(geoberle) once the test image contains more test clients, we need to define args for the entrypoint or something similar diff --git a/test/e2e/syncer/multishard/multishard_test.go b/test/e2e/syncer/multishard/multishard_test.go deleted file mode 100644 index af8b69934ba..00000000000 --- a/test/e2e/syncer/multishard/multishard_test.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package multishard - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - appsv1apply "k8s.io/client-go/applyconfigurations/apps/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/retry" - - "github.com/kcp-dev/kcp/test/e2e/framework" - "github.com/kcp-dev/kcp/test/e2e/syncer/multishard/workspace1" - "github.com/kcp-dev/kcp/test/e2e/syncer/multishard/workspace2" -) - -// TestSyncingFromMultipleShards ensures that the syncer can effectively sync from several workspaces hosted on distinct shards -// with distinct virtual workspace URLs. -func TestSyncingFromMultipleShards(t *testing.T) { - t.Skip("Test is flaky, and we are going to remove the syncer soon anyway") - - t.Parallel() - framework.Suite(t, "transparent-multi-cluster:requires-kind") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upstreamServer := framework.SharedKcpServer(t) - upstreamConfig := upstreamServer.BaseConfig(t) - - shardNames := upstreamServer.ShardNames() - if len(shardNames) < 2 { - t.Skip("Test requires at least 2 shards") - } - - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - locationPath, locationWs := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - locationWsName := logicalcluster.Name(locationWs.Spec.Cluster) - - workloadWorkspace1Path, workloadWorkspace1 := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithShard(shardNames[0])) - workloadWorkspace1Name := logicalcluster.Name(workloadWorkspace1.Spec.Cluster) - - workloadWorkspace2Path, workloadWorkspace2 := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithShard(shardNames[1])) - workloadWorkspace2Name := logicalcluster.Name(workloadWorkspace2.Spec.Cluster) - - upstreamKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - // Creating synctarget and deploying the syncer - syncer := framework.NewSyncerFixture(t, upstreamServer, locationPath, framework.WithSyncedUserWorkspaces(workloadWorkspace1, workloadWorkspace2)). - CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - t.Log("Binding workspace 1 to the location workspace") - framework.NewBindCompute(t, workloadWorkspace1Name.Path(), upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWsName.Path()), - ).Bind(t) - - t.Log("Binding workspace 2 to the location workspace") - framework.NewBindCompute(t, workloadWorkspace2Name.Path(), upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWsName.Path()), - ).Bind(t) - - err = framework.CreateResources(ctx, workspace1.FS, upstreamConfig, workloadWorkspace1Path) - require.NoError(t, err) - - err = framework.CreateResources(ctx, workspace2.FS, upstreamConfig, workloadWorkspace2Path) - require.NoError(t, err) - - downstreamWS1NS := syncer.DownstreamNamespaceFor(t, workloadWorkspace1Name, "default") - t.Logf("Downstream namespace in workspace 1 is %s", downstreamWS1NS) - - downstreamWS2NS := syncer.DownstreamNamespaceFor(t, workloadWorkspace2Name, "default") - t.Logf("Downstream namespace in workspace 2 is %s", downstreamWS2NS) - - downstreamKubeClient, err := kubernetes.NewForConfig(syncer.DownstreamConfig) - require.NoError(t, err) - - framework.Eventually(t, func() (success bool, reason string) { - _, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS).Get(ctx, "test1", metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "deployment test1 not found downstream" - } - require.NoError(t, err) - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "deployment test1 not synced downstream") - - framework.Eventually(t, func() (success bool, reason string) { - _, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS2NS).Get(ctx, "test2", metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "deployment test2 not found downstream" - } - require.NoError(t, err) - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*500, "deployment test1 not synced downstream") - - if len(framework.TestConfig.PClusterKubeconfig()) == 0 { - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - _, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS).ApplyStatus(ctx, appsv1apply.Deployment("test1", downstreamWS1NS).WithStatus(&appsv1apply.DeploymentStatusApplyConfiguration{ - Replicas: ptr(int32(1)), - UpdatedReplicas: ptr(int32(1)), - AvailableReplicas: ptr(int32(1)), - ReadyReplicas: ptr(int32(1)), - }), metav1.ApplyOptions{FieldManager: "e2e-test", Force: true}) - return err - }) - require.NoError(t, err) - - err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - _, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS2NS).ApplyStatus(ctx, appsv1apply.Deployment("test2", downstreamWS2NS).WithStatus(&appsv1apply.DeploymentStatusApplyConfiguration{ - Replicas: ptr(int32(1)), - UpdatedReplicas: ptr(int32(1)), - AvailableReplicas: ptr(int32(0)), - ReadyReplicas: ptr(int32(0)), - }), metav1.ApplyOptions{FieldManager: "e2e-test", Force: true}) - return err - }) - require.NoError(t, err) - } - - framework.Eventually(t, func() (success bool, reason string) { - test1Downstream, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS1NS).Get(ctx, "test1", metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "deployment test1 not found downstream" - } - require.NoError(t, err) - - test1Upstream, err := upstreamKubeClusterClient.AppsV1().Cluster(workloadWorkspace1Path).Deployments("default").Get(ctx, "test1", metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "deployment test1 not found upstream" - } - require.NoError(t, err) - diff := cmp.Diff(test1Downstream.Status, test1Upstream.Status) - return len(diff) == 0, fmt.Sprintf("status different between downstream and upstream: %s", diff) - }, wait.ForeverTestTimeout, time.Millisecond*500, "status of deployment test1 not synced back upstream") - - framework.Eventually(t, func() (success bool, reason string) { - test2Downstream, err := downstreamKubeClient.AppsV1().Deployments(downstreamWS2NS).Get(ctx, "test2", metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "deployment test2 not found downstream" - } - require.NoError(t, err) - - test2Upstream, err := upstreamKubeClusterClient.AppsV1().Cluster(workloadWorkspace2Path).Deployments("default").Get(ctx, "test2", metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "deployment test1 not found upstream" - } - require.NoError(t, err) - diff := cmp.Diff(test2Downstream.Status, test2Upstream.Status) - return len(diff) == 0, fmt.Sprintf("status different between downstream and upstream: %s", diff) - }, wait.ForeverTestTimeout, time.Millisecond*500, "status of deployment test2 not synced back upstream") -} - -func ptr[T interface{}](val T) *T { - other := val - return &other -} diff --git a/test/e2e/syncer/multishard/workspace1/deployment.yaml b/test/e2e/syncer/multishard/workspace1/deployment.yaml deleted file mode 100644 index 5c3efd63b49..00000000000 --- a/test/e2e/syncer/multishard/workspace1/deployment.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test1 - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: test1 - image: ghcr.io/distroless/busybox:latest - command: - - /bin/sh - - -ec - - | - echo "Going to sleep" - tail -f /dev/null diff --git a/test/e2e/syncer/multishard/workspace1/embed.go b/test/e2e/syncer/multishard/workspace1/embed.go deleted file mode 100644 index 7ff2f7f0df7..00000000000 --- a/test/e2e/syncer/multishard/workspace1/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workspace1 - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/syncer/multishard/workspace2/deployment.yaml b/test/e2e/syncer/multishard/workspace2/deployment.yaml deleted file mode 100644 index b3b3f282d25..00000000000 --- a/test/e2e/syncer/multishard/workspace2/deployment.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: test2 - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - app: nginx - template: - metadata: - labels: - app: nginx - spec: - containers: - - name: test2 - image: ghcr.io/distroless/busybox:latest - command: - - /bin/sh - - -ec - - | - echo "Going to sleep" - tail -f /dev/null diff --git a/test/e2e/syncer/multishard/workspace2/embed.go b/test/e2e/syncer/multishard/workspace2/embed.go deleted file mode 100644 index 3b321dcd49f..00000000000 --- a/test/e2e/syncer/multishard/workspace2/embed.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package workspace2 - -import ( - "embed" -) - -//go:embed *.yaml -var FS embed.FS diff --git a/test/e2e/syncer/persistentvolume.yaml b/test/e2e/syncer/persistentvolume.yaml deleted file mode 100644 index f31d713a84a..00000000000 --- a/test/e2e/syncer/persistentvolume.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: syncer-test-pv -spec: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - nfs: - path: /tmp - server: 127.0.0.1 - storageClassName: standard - volumeMode: Filesystem diff --git a/test/e2e/syncer/syncer_test.go b/test/e2e/syncer/syncer_test.go deleted file mode 100644 index e05502e3aff..00000000000 --- a/test/e2e/syncer/syncer_test.go +++ /dev/null @@ -1,707 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "context" - "embed" - "encoding/json" - "fmt" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/utils/pointer" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - kubefixtures "github.com/kcp-dev/kcp/test/e2e/fixtures/kube" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -//go:embed *.yaml -var embeddedResources embed.FS - -func TestSyncerLifecycle(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - upstreamServer := framework.SharedKcpServer(t) - - t.Log("Creating an organization") - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - - t.Log("Creating a workspace") - wsPath, ws := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - - // The Start method of the fixture will initiate syncer start and then wait for - // its sync target to go ready. This implicitly validates the syncer - // heartbeating and the heartbeat controller setting the sync target ready in - // response. - syncerFixture := framework.NewSyncerFixture(t, upstreamServer, wsPath, - framework.WithExtraResources("persistentvolumes"), - framework.WithSyncedUserWorkspaces(ws), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - if !isFakePCluster { - // Only need to install services and ingresses in a logical cluster - return - } - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err, "failed to create apiextensions client") - t.Logf("Installing test CRDs into sink cluster...") - kubefixtures.Create(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), - metav1.GroupResource{Group: "core.k8s.io", Resource: "persistentvolumes"}, - ) - require.NoError(t, err) - })).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kcpClient, err := kcpclientset.NewForConfig(upstreamServer.BaseConfig(t)) - require.NoError(t, err) - t.Logf("Waiting for negotiaged api to be generated...") - require.Eventually(t, func() bool { - negotiatedAPIs, err := kcpClient.Cluster(wsPath).ApiresourceV1alpha1().NegotiatedAPIResources().List(ctx, metav1.ListOptions{}) - if err != nil { - return false - } - return len(negotiatedAPIs.Items) > 0 - }, wait.ForeverTestTimeout, time.Millisecond*100, "negotiaged apis are not generated") - - t.Logf("Bind location workspace") - framework.NewBindCompute(t, wsPath, upstreamServer, framework.WithAPIExportsWorkloadBindOption("root:compute:kubernetes", workloadv1alpha1.ImportedAPISExportName)).Bind(t) - - upstreamConfig := upstreamServer.BaseConfig(t) - upstreamKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - t.Log("Creating upstream namespace...") - upstreamNamespace, err := upstreamKubeClusterClient.Cluster(wsPath).CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-syncer", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - downstreamKubeClient, err := kubernetes.NewForConfig(syncerFixture.DownstreamConfig) - require.NoError(t, err) - - upstreamKcpClient, err := kcpclientset.NewForConfig(syncerFixture.SyncerConfig.UpstreamConfig) - require.NoError(t, err) - - syncTarget, err := upstreamKcpClient.Cluster(syncerFixture.SyncerConfig.SyncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, - syncerFixture.SyncerConfig.SyncTargetName, - metav1.GetOptions{}, - ) - require.NoError(t, err) - - desiredNSLocator := shared.NewNamespaceLocator(logicalcluster.Name(ws.Spec.Cluster), logicalcluster.From(syncTarget), - syncTarget.GetUID(), syncTarget.Name, upstreamNamespace.Name) - downstreamNamespaceName, err := shared.PhysicalClusterNamespaceName(desiredNSLocator) - require.NoError(t, err) - - t.Logf("Waiting for downstream namespace to be created...") - require.Eventually(t, func() bool { - _, err = downstreamKubeClient.CoreV1().Namespaces().Get(ctx, downstreamNamespaceName, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return false - } - require.NoError(t, err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream namespace %s for upstream namespace %s was not created", downstreamNamespaceName, upstreamNamespace.Name) - - configMapName := "kcp-root-ca.crt" - t.Logf("Waiting for downstream configmap %s/%s to be created...", downstreamNamespaceName, configMapName) - require.Eventually(t, func() bool { - _, err = downstreamKubeClient.CoreV1().ConfigMaps(downstreamNamespaceName).Get(ctx, configMapName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false - } - if err != nil { - t.Errorf("saw an error waiting for downstream configmap %s/%s to be created: %v", downstreamNamespaceName, configMapName, err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream configmap %s/%s was not created", downstreamNamespaceName, configMapName) - - t.Logf("Deleting the downstream kcp-root-ca.crt configmap to ensure it is recreated.") - err = downstreamKubeClient.CoreV1().ConfigMaps(downstreamNamespaceName).Delete(ctx, configMapName, metav1.DeleteOptions{}) - require.NoError(t, err) - - t.Logf("Waiting for downstream configmap %s/%s to be recreated...", downstreamNamespaceName, configMapName) - framework.Eventually(t, func() (bool, string) { - _, err = downstreamKubeClient.CoreV1().ConfigMaps(downstreamNamespaceName).Get(ctx, configMapName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "not found" - } - if err != nil { - t.Errorf("saw an error waiting for downstream configmap %s/%s to be recreated: %v", downstreamNamespaceName, configMapName, err) - return false, "error getting configmap" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream configmap %s/%s was not recreated", downstreamNamespaceName, configMapName) - - t.Log("Creating upstream deployment...") - - deploymentYAML, err := embeddedResources.ReadFile("deployment.yaml") - require.NoError(t, err, "failed to read embedded deployment") - - var deployment *appsv1.Deployment - err = yaml.Unmarshal(deploymentYAML, &deployment) - require.NoError(t, err, "failed to unmarshal deployment") - t.Logf("unmarshalled into: %#v", deployment) - - // This test created a new workspace that initially lacked support for deployments, but once the - // sync target went ready (checked by the syncer fixture's Start method) the api importer - // will have enabled deployments in the logical cluster. - t.Logf("Waiting for deployment to be created in upstream") - var upstreamDeployment *appsv1.Deployment - require.Eventually(t, func() bool { - upstreamDeployment, err = upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Create(ctx, deployment, metav1.CreateOptions{}) - return err == nil - }, wait.ForeverTestTimeout, time.Millisecond*100, "deployment not created") - - t.Logf("difference between what we sent and what we got: %v", cmp.Diff(deployment, upstreamDeployment)) - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name) - - t.Logf("Waiting for upstream deployment %s/%s to get the syncer finalizer", upstreamNamespace.Name, upstreamDeployment.Name) - require.Eventually(t, func() bool { - deployment, err = upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false - } - if err != nil { - t.Errorf("saw an error waiting for upstream deployment %s/%s to get the syncer finalizer: %v", upstreamNamespace.Name, upstreamDeployment.Name, err) - } - for _, finalizer := range deployment.Finalizers { - if finalizer == "workload.kcp.io/syncer-"+syncTargetKey { - return true - } - } - return false - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upstream deployment %s/%s syncer finalizer was not added", upstreamNamespace.Name, upstreamDeployment.Name) - - t.Logf("Waiting for downstream deployment %s/%s to be created...", downstreamNamespaceName, upstreamDeployment.Name) - require.Eventually(t, func() bool { - deployment, err = downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false - } - if err != nil { - t.Errorf("saw an error waiting for downstream deployment %s/%s to be created: %v", downstreamNamespaceName, upstreamDeployment.Name, err) - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream deployment %s/%s was not synced", downstreamNamespaceName, upstreamDeployment.Name) - - if len(framework.TestConfig.PClusterKubeconfig()) > 0 { - t.Logf("Check for available replicas if downstream is capable of actually running the deployment") - expectedAvailableReplicas := int32(1) - var lastEvents time.Time - framework.Eventually(t, func() (bool, string) { - deployment, err = downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - require.NoError(t, err) - if actual, expected := deployment.Status.AvailableReplicas, expectedAvailableReplicas; actual != expected { - lastEvents = dumpPodEvents(t, lastEvents, downstreamKubeClient, downstreamNamespaceName) - return false, fmt.Sprintf("deployment had %d available replicas, not %d", actual, expected) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream deployment %s/%s didn't get available", downstreamNamespaceName, upstreamDeployment.Name) - - // This test creates a deployment upstream, and will run downstream with a mutated projected - // in-cluster kubernetes config that points back to KCP. The running container will use this config to - // create a configmap upstream to verify the correctness of the mutation. - t.Logf("Create upstream service account permissions for downstream in-cluster configuration test") - - configmapAdminRoleYAML, err := embeddedResources.ReadFile("configmap-admin-role.yaml") - require.NoError(t, err, "failed to read embedded role") - - var configmapAdminRole *rbacv1.Role - err = yaml.Unmarshal(configmapAdminRoleYAML, &configmapAdminRole) - require.NoError(t, err, "failed to unmarshal role") - - _, err = upstreamKubeClusterClient.Cluster(wsPath).RbacV1().Roles(upstreamNamespace.Name).Create(ctx, configmapAdminRole, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create upstream role") - - configmapAdminRoleBindingYAML, err := embeddedResources.ReadFile("configmap-admin-rolebinding.yaml") - require.NoError(t, err, "failed to read embedded rolebinding") - - var configmapAdminRoleBinding *rbacv1.RoleBinding - err = yaml.Unmarshal(configmapAdminRoleBindingYAML, &configmapAdminRoleBinding) - require.NoError(t, err, "failed to unmarshal rolebinding") - - _, err = upstreamKubeClusterClient.Cluster(wsPath).RbacV1().RoleBindings(upstreamNamespace.Name).Create(ctx, configmapAdminRoleBinding, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create upstream rolebinding") - - t.Logf("Creating upstream in-cluster configuration test deployment") - - iccDeploymentYAML, err := embeddedResources.ReadFile("in-cluster-config-test-deployment.yaml") - require.NoError(t, err, "failed to read embedded deployment") - - var iccDeployment *appsv1.Deployment - err = yaml.Unmarshal(iccDeploymentYAML, &iccDeployment) - require.NoError(t, err, "failed to unmarshal deployment") - iccDeployment.Spec.Template.Spec.Containers[0].Image = framework.TestConfig.KCPTestImage() - expectedConfigMapName := "expected-configmap" - iccDeployment.Spec.Template.Spec.Containers[0].Env[0].Value = expectedConfigMapName - - iccUpstreamDeployment, err := upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Create(ctx, iccDeployment, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create icc-test deployment") - - t.Logf("Waiting for downstream in-cluster config test deployment %s/%s to be created...", downstreamNamespaceName, iccUpstreamDeployment.Name) - var logState map[string]*metav1.Time - framework.Eventually(t, func() (bool, string) { - deployment, err = downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Get(ctx, iccUpstreamDeployment.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "" - } - require.NoError(t, err) - dumpPodEvents(t, lastEvents, downstreamKubeClient, downstreamNamespaceName) - logState = dumpPodLogs(t, logState, downstreamKubeClient, downstreamNamespaceName) - if actual, expected := deployment.Status.AvailableReplicas, int32(1); actual != expected { - return false, fmt.Sprintf("deployment had %d available replicas, not %d", actual, expected) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream deployment %s/%s was not synced", downstreamNamespaceName, iccUpstreamDeployment.Name) - - t.Logf("Waiting for configmap generated by icc-test deployment to show up upstream") - require.Eventually(t, func() bool { - logState = dumpPodLogs(t, logState, downstreamKubeClient, downstreamNamespaceName) - - _, err := upstreamKubeClusterClient.Cluster(wsPath).CoreV1().ConfigMaps(upstreamNamespace.Name).Get(ctx, expectedConfigMapName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false - } - require.NoError(t, err) - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "upstream configmap %s/%s was not found", upstreamNamespace.Name, expectedConfigMapName) - } - // Delete the deployment - err = downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Delete(ctx, deployment.Name, metav1.DeleteOptions{}) - require.NoError(t, err) - - // Wait the deployment to be recreated and check it is a different UID - t.Logf("Waiting for downstream deployment %s/%s to be created...", downstreamNamespaceName, upstreamDeployment.Name) - require.Eventually(t, func() bool { - newDeployment, err := downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false - } - require.NoError(t, err) - return deployment.UID != newDeployment.UID - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream deployment %s/%s was not synced", downstreamNamespaceName, upstreamDeployment.Name) - - // Add a virtual Finalizer to the deployment and update it. - t.Logf("Adding a virtual finalizer to the upstream deployment %s/%s in order to simulate an external controller", upstreamNamespace.Name, upstreamDeployment.Name) - deploymentPatch := []byte(`{"metadata":{"annotations":{"finalizers.workload.kcp.io/` + syncTargetKey + `":"external-controller-finalizer"}}}`) - _, err = upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Patch(ctx, upstreamDeployment.Name, types.MergePatchType, deploymentPatch, metav1.PatchOptions{}) - require.NoError(t, err) - - t.Logf("Deleting upstream deployment %s/%s", upstreamNamespace.Name, upstreamDeployment.Name) - err = upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Delete(ctx, upstreamDeployment.Name, metav1.DeleteOptions{GracePeriodSeconds: pointer.Int64(0)}) - require.NoError(t, err) - - t.Logf("Checking if the upstream deployment %s/%s has the per-location deletion annotation set", upstreamNamespace.Name, upstreamDeployment.Name) - framework.Eventually(t, func() (bool, string) { - deployment, err := upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, "" - } - require.NoError(t, err) - if val, ok := deployment.GetAnnotations()["deletion.internal.workload.kcp.io/"+syncTargetKey]; !ok || val == "" { - return false, fmt.Sprintf("deployment did not have the %s annotation", "deletion.internal.workload.kcp.io/"+syncTargetKey) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "upstream Deployment %s/%s didn't get the per-location deletion annotation set or there was an error", upstreamNamespace.Name, upstreamDeployment.Name) - - t.Logf("Checking if upstream deployment %s/%s is deleted, shouldn't as the syncer will not remove its finalizer due to the virtual finalizer", upstreamNamespace.Name, upstreamDeployment.Name) - _, err = upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - require.False(t, apierrors.IsNotFound(err)) - require.NoError(t, err) - - t.Logf("Checking if the downstream deployment %s/%s is deleted or not (shouldn't as there's a virtual finalizer that blocks the deletion of the downstream resource)", downstreamNamespaceName, upstreamDeployment.Name) - _, err = downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - require.False(t, apierrors.IsNotFound(err)) - require.NoError(t, err) - - t.Logf("Deleting upstream namespace %s", upstreamNamespace.Name) - err = upstreamKubeClusterClient.Cluster(wsPath).CoreV1().Namespaces().Delete(ctx, upstreamNamespace.Name, metav1.DeleteOptions{}) - require.NoError(t, err) - - t.Logf("Checking if upstream namespace %s deletion timestamp is set", upstreamNamespace.Name) - framework.Eventually(t, func() (bool, string) { - namespace, err := upstreamKubeClusterClient.Cluster(wsPath).CoreV1().Namespaces().Get(ctx, upstreamNamespace.Name, metav1.GetOptions{}) - require.NoError(t, err) - if namespace.DeletionTimestamp == nil { - return false, "namespace deletion timestamp is not set" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "upstream Namespace %s was not deleted", upstreamNamespace.Name) - - t.Logf("Checking if downstream namespace %s is marked for deletion or deleted, shouldn't as there's a deployment with a virtual finalizer", downstreamNamespaceName) - require.Neverf(t, func() bool { - namespace, err := downstreamKubeClient.CoreV1().Namespaces().Get(ctx, downstreamNamespaceName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return true - } - require.NoError(t, err) - return namespace.DeletionTimestamp != nil - }, 5*time.Second, time.Millisecond*100, "downstream Namespace %s was marked for deletion or deleted", downstreamNamespaceName) - - // deleting a virtual Finalizer on the deployment and updating it. - t.Logf("Removing the virtual finalizer on the upstream deployment %s/%s, the deployment deletion should go through after this", upstreamNamespace.Name, upstreamDeployment.Name) - deploymentPatch = []byte(`{"metadata":{"annotations":{"finalizers.workload.kcp.io/` + syncTargetKey + `": null}}}`) - _, err = upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Patch(ctx, upstreamDeployment.Name, types.MergePatchType, deploymentPatch, metav1.PatchOptions{}) - require.NoError(t, err) - - t.Logf("Waiting for upstream deployment %s/%s to be deleted", upstreamNamespace.Name, upstreamDeployment.Name) - require.Eventually(t, func() bool { - _, err := upstreamKubeClusterClient.Cluster(wsPath).AppsV1().Deployments(upstreamNamespace.Name).Get(ctx, upstreamDeployment.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return true - } - require.NoError(t, err) - return false - }, wait.ForeverTestTimeout, time.Millisecond*100, "upstream Deployment %s/%s was not deleted", upstreamNamespace.Name, upstreamDeployment.Name) - - t.Logf("Waiting for downstream namespace %s to be marked for deletion or deleted", downstreamNamespaceName) - framework.Eventually(t, func() (bool, string) { - namespace, err := downstreamKubeClient.CoreV1().Namespaces().Get(ctx, downstreamNamespaceName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return true, "namespace was deleted" - } - require.NoError(t, err) - if namespace.DeletionTimestamp != nil { - return true, "deletionTimestamp is set." - } - return false, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream Namespace %s was not marked for deletion or deleted", downstreamNamespaceName) - - t.Logf("Creating Persistent Volume to test cluster-wide resource syncing") - pvYAML, err := embeddedResources.ReadFile("persistentvolume.yaml") - require.NoError(t, err, "failed to read embedded persistenvolume") - - var persistentVolume *corev1.PersistentVolume - err = yaml.Unmarshal(pvYAML, &persistentVolume) - require.NoError(t, err, "failed to unmarshal persistentvolume") - - upstreamPersistentVolume, err := upstreamKubeClusterClient.Cluster(wsPath).CoreV1().PersistentVolumes().Create(ctx, persistentVolume, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create persistentVolume") - - t.Logf("Waiting for the Persistent Volume to be scheduled upstream") - framework.Eventually(t, func() (bool, string) { - pv, err := upstreamKubeClusterClient.Cluster(wsPath).CoreV1().PersistentVolumes().Get(ctx, upstreamPersistentVolume.Name, metav1.GetOptions{}) - if err != nil { - return false, err.Error() - } - if val, ok := pv.GetLabels()["state.workload.kcp.io/"+syncTargetKey]; ok { - if val != "" { - return false, "state label is not empty, should be." - } - return true, "" - } - return false, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Persistent Volume %s was not scheduled", upstreamPersistentVolume.Name) - - t.Logf("Updating the PV to be force it to be scheduled downstream") - pvPatch := []byte(`{"metadata":{"labels":{"state.workload.kcp.io/` + syncTargetKey + `": "Sync"}}}`) - _, err = upstreamKubeClusterClient.Cluster(wsPath).CoreV1().PersistentVolumes().Patch(ctx, upstreamPersistentVolume.Name, types.MergePatchType, pvPatch, metav1.PatchOptions{}) - require.NoError(t, err, "failed to patch persistentVolume") - - t.Logf("Waiting for the Persistent Volume to be synced downstream and validate its NamespceLocator") - framework.Eventually(t, func() (bool, string) { - pv, err := downstreamKubeClient.CoreV1().PersistentVolumes().Get(ctx, upstreamPersistentVolume.Name, metav1.GetOptions{}) - if err != nil { - return false, err.Error() - } - if val := pv.GetAnnotations()[shared.NamespaceLocatorAnnotation]; val != "" { - desiredNSLocator.Namespace = "" - desiredNSLocatorByte, err := json.Marshal(desiredNSLocator) - require.NoError(t, err, "failed to marshal namespaceLocator") - if string(desiredNSLocatorByte) != val { - return false, "namespaceLocator for persistentVolume doesn't match the expected one" - } - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Persistent Volume %s was not synced downstream", upstreamPersistentVolume.Name) - - t.Logf("Deleting the Persistent Volume upstream") - err = upstreamKubeClusterClient.Cluster(wsPath).CoreV1().PersistentVolumes().Delete(ctx, upstreamPersistentVolume.Name, metav1.DeleteOptions{}) - require.NoError(t, err, "failed to delete persistentVolume upstream") - - t.Logf("Waiting for the Persistent Volume to be deleted upstream") - framework.Eventually(t, func() (bool, string) { - _, err := upstreamKubeClusterClient.Cluster(wsPath).CoreV1().PersistentVolumes().Get(ctx, upstreamPersistentVolume.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return true, "" - } - require.NoError(t, err) - return false, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Persistent Volume %s was not deleted upstream", upstreamPersistentVolume.Name) - - t.Logf("Waiting for the Persistent Volume to be deleted downstream") - framework.Eventually(t, func() (bool, string) { - pv, err := downstreamKubeClient.CoreV1().PersistentVolumes().Get(ctx, upstreamPersistentVolume.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return true, "pv is not found" - } - if pv.DeletionTimestamp != nil { - return true, "deletionTimestamp is set." - } - require.NoError(t, err) - return false, "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Persistent Volume %s was not deleted downstream", upstreamPersistentVolume.Name) -} - -func dumpPodEvents(t *testing.T, startAfter time.Time, downstreamKubeClient kubernetes.Interface, downstreamNamespaceName string) time.Time { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - eventList, err := downstreamKubeClient.CoreV1().Events(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting events: %v", err) - return startAfter // ignore. Error here are not the ones we care for. - } - - sort.Slice(eventList.Items, func(i, j int) bool { - return eventList.Items[i].LastTimestamp.Time.Before(eventList.Items[j].LastTimestamp.Time) - }) - - last := startAfter - for _, event := range eventList.Items { - if event.InvolvedObject.Kind != "Pod" { - continue - } - if event.LastTimestamp.After(startAfter) { - t.Logf("Event for pod %s/%s: %s", event.InvolvedObject.Namespace, event.InvolvedObject.Name, event.Message) - } - if event.LastTimestamp.After(last) { - last = event.LastTimestamp.Time - } - } - - pods, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting pods: %v", err) - return last // ignore. Error here are not the ones we care for. - } - - for _, pod := range pods.Items { - for _, s := range pod.Status.ContainerStatuses { - if s.State.Terminated != nil && s.State.Terminated.FinishedAt.After(startAfter) { - t.Logf("Pod %s/%s container %s terminated with exit code %d: %s", pod.Namespace, pod.Name, s.Name, s.State.Terminated.ExitCode, s.State.Terminated.Message) - } - } - } - - return last -} - -func dumpPodLogs(t *testing.T, startAfter map[string]*metav1.Time, downstreamKubeClient kubernetes.Interface, downstreamNamespaceName string) map[string]*metav1.Time { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - if startAfter == nil { - startAfter = make(map[string]*metav1.Time) - } - - pods, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Error getting pods: %v", err) - return startAfter // ignore. Error here are not the ones we care for. - } - for _, pod := range pods.Items { - for _, c := range pod.Spec.Containers { - key := fmt.Sprintf("%s/%s", pod.Name, c.Name) - now := metav1.Now() - res, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).GetLogs(pod.Name, &corev1.PodLogOptions{ - SinceTime: startAfter[key], - Container: c.Name, - }).DoRaw(ctx) - if err != nil { - t.Logf("Failed to get logs for pod %s/%s container %s: %v", pod.Namespace, pod.Name, c.Name, err) - continue - } - for _, line := range strings.Split(string(res), "\n") { - t.Logf("Pod %s/%s container %s: %s", pod.Namespace, pod.Name, c.Name, line) - } - startAfter[key] = &now - } - } - - return startAfter -} - -func TestSyncWorkload(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - syncTargetName := "test-wlc" - upstreamServer := framework.SharedKcpServer(t) - - t.Log("Creating an organization") - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - - t.Log("Creating a workspace") - wsPath, _ := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - - // Write the upstream logical cluster config to disk for the workspace plugin - upstreamRawConfig, err := upstreamServer.RawConfig() - require.NoError(t, err) - _, kubeconfigPath := framework.WriteLogicalClusterConfig(t, upstreamRawConfig, "base", wsPath) - - subCommand := []string{ - "workload", - "sync", - syncTargetName, - "--syncer-image", - "ghcr.io/kcp-dev/kcp/syncer-c2e3073d5026a8f7f2c47a50c16bdbec:41ca72b", - "--output-file", "-", - } - - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommand) - - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommand) -} - -func TestCordonUncordonDrain(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - upstreamServer := framework.SharedKcpServer(t) - - t.Log("Creating an organization") - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - - upstreamCfg := upstreamServer.BaseConfig(t) - - t.Log("Creating a workspace") - wsPath, _ := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - - // Write the upstream logical cluster config to disk for the workspace plugin - upstreamRawConfig, err := upstreamServer.RawConfig() - require.NoError(t, err) - _, kubeconfigPath := framework.WriteLogicalClusterConfig(t, upstreamRawConfig, "base", wsPath) - - kcpClusterClient, err := kcpclientset.NewForConfig(upstreamCfg) - require.NoError(t, err, "failed to construct client for server") - - // The Start method of the fixture will initiate syncer start and then wait for - // its sync target to go ready. This implicitly validates the syncer - // heartbeating and the heartbeat controller setting the sync target ready in - // response. - syncerFixture := framework.NewSyncerFixture(t, upstreamServer, wsPath).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - syncTargetName := syncerFixture.SyncerConfig.SyncTargetName - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - t.Log("Check initial workload") - cluster, err := kcpClusterClient.Cluster(wsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - require.NoError(t, err, "failed to get sync target", syncTargetName) - require.False(t, cluster.Spec.Unschedulable) - require.Nil(t, cluster.Spec.EvictAfter) - - t.Log("Cordon workload") - subCommandCordon := []string{ - "workload", - "cordon", - syncTargetName, - } - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandCordon) - - t.Log("Check workload after cordon") - cluster, err = kcpClusterClient.Cluster(wsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - require.NoError(t, err, "failed to get sync target", syncTargetName) - require.True(t, cluster.Spec.Unschedulable) - require.Nil(t, cluster.Spec.EvictAfter) - - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandCordon) - - t.Log("Uncordon workload") - subCommandUncordon := []string{ - "workload", - "uncordon", - syncTargetName, - } - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandUncordon) - - t.Log("Check workload after uncordon") - cluster, err = kcpClusterClient.Cluster(wsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - require.NoError(t, err, "failed to get sync target", syncTargetName) - require.False(t, cluster.Spec.Unschedulable) - require.Nil(t, cluster.Spec.EvictAfter) - - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandUncordon) - - t.Log("Drain workload") - subCommandDrain := []string{ - "workload", - "drain", - syncTargetName, - } - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandDrain) - - t.Log("Check workload after drain started") - cluster, err = kcpClusterClient.Cluster(wsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - require.NoError(t, err, "failed to get sync target", syncTargetName) - require.True(t, cluster.Spec.Unschedulable) - require.NotNil(t, cluster.Spec.EvictAfter) - - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandDrain) - - t.Log("Remove drain, uncordon workload") - subCommandUncordon = []string{ - "workload", - "uncordon", - syncTargetName, - } - - framework.RunKcpCliPlugin(t, kubeconfigPath, subCommandUncordon) - - t.Log("Check workload after uncordon") - cluster, err = kcpClusterClient.Cluster(wsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - require.NoError(t, err, "failed to get sync target", syncTargetName) - require.False(t, cluster.Spec.Unschedulable) - require.Nil(t, cluster.Spec.EvictAfter) -} diff --git a/test/e2e/syncer/tunnels_test.go b/test/e2e/syncer/tunnels_test.go deleted file mode 100644 index 1c703ce1d95..00000000000 --- a/test/e2e/syncer/tunnels_test.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/kcp-dev/kcp/pkg/syncer/shared" - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestSyncerTunnel(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster:requires-kind") - - if len(framework.TestConfig.PClusterKubeconfig()) == 0 { - t.Skip("Test requires a pcluster") - } - - upstreamServer := framework.SharedKcpServer(t) - - t.Log("Creating an organization") - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - t.Logf("Creating one workspace for the synctarget on shard %q ", corev1alpha1.RootShard) - synctargetWsPath, synctargetWs := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithRootShard()) - synctargetWsName := logicalcluster.Name(synctargetWs.Spec.Cluster) - - userWSShardName := corev1alpha1.RootShard - shardNames := upstreamServer.ShardNames() - if len(shardNames) > 1 { - userWSShardName = shardNames[1] - } - - t.Logf("Creating one workspace for the synctarget on shard %q ", userWSShardName) - userWsPath, userWs := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.WithShard(userWSShardName)) - userWsName := logicalcluster.Name(userWs.Spec.Cluster) - - // The Start method of the fixture will initiate syncer start and then wait for - // its sync target to go ready. This implicitly validates the syncer - // heartbeating and the heartbeat controller setting the sync target ready in - // response. - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - syncerFixture := framework.NewSyncerFixture(t, upstreamServer, synctargetWsName.Path(), - framework.WithSyncedUserWorkspaces(userWs), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - syncerFixture.WaitForSyncTargetReady(ctx, t) - - t.Log("Binding the consumer workspace to the location workspace") - framework.NewBindCompute(t, userWsName.Path(), upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(synctargetWsName.Path()), - ).Bind(t) - - upstreamConfig := upstreamServer.BaseConfig(t) - upstreamKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - // From now on, we'll be using the user-1 credentials to interact with the workspace etc. This is done to - // simulate a user that is not kcp-admin and to make sure that it can access the logs of a pod through a synctarget - // that is in another workspace. - clusterAdminUser := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin-user-1"}, - Subjects: []rbacv1.Subject{ - {Kind: "User", Name: "user-1"}, - }, - RoleRef: rbacv1.RoleRef{APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", Name: "cluster-admin"}, - } - - _, err = upstreamKubeClusterClient.Cluster(userWsPath).RbacV1().ClusterRoleBindings().Create(ctx, clusterAdminUser, metav1.CreateOptions{}) - require.NoError(t, err) - - // Create a client using the user-1 token. - userConfig := framework.ConfigWithToken("user-1-token", upstreamServer.BaseConfig(t)) - userKcpClient, err := kcpkubernetesclientset.NewForConfig(userConfig) - require.NoError(t, err) - - t.Log("Creating upstream namespace...") - require.Eventually(t, func() bool { - _, err := userKcpClient.Cluster(userWsPath).CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-syncer", - }, - }, metav1.CreateOptions{}) - if err != nil { - if apierrors.IsAlreadyExists(err) { - return true - } - t.Errorf("saw an error creating upstream namespace: %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "upstream namespace was not created") - - upstreamNamespaceName := "test-syncer" - - require.NoError(t, err) - - downstreamKubeClient, err := kubernetes.NewForConfig(syncerFixture.DownstreamConfig) - require.NoError(t, err) - - upstreamKcpClient, err := kcpclientset.NewForConfig(syncerFixture.SyncerConfig.UpstreamConfig) - require.NoError(t, err) - - syncTarget, err := upstreamKcpClient.Cluster(synctargetWsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, - syncerFixture.SyncerConfig.SyncTargetName, - metav1.GetOptions{}, - ) - require.NoError(t, err) - - desiredNSLocator := shared.NewNamespaceLocator(userWsName, synctargetWsName, - syncTarget.GetUID(), syncTarget.Name, upstreamNamespaceName) - require.NoError(t, err) - - downstreamNamespaceName, err := shared.PhysicalClusterNamespaceName(desiredNSLocator) - require.NoError(t, err) - - t.Logf("Waiting for downstream namespace to be created...") - require.Eventually(t, func() bool { - _, err = downstreamKubeClient.CoreV1().Namespaces().Get(ctx, downstreamNamespaceName, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return false - } - require.NoError(t, err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream namespace %s for upstream namespace %s was not created", downstreamNamespaceName, upstreamNamespaceName) - - configMapName := "kcp-root-ca.crt" - t.Logf("Waiting for downstream configmap %s/%s to be created...", downstreamNamespaceName, configMapName) - require.Eventually(t, func() bool { - _, err = downstreamKubeClient.CoreV1().ConfigMaps(downstreamNamespaceName).Get(ctx, configMapName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false - } - if err != nil { - t.Errorf("saw an error waiting for downstream configmap %s/%s to be created: %v", downstreamNamespaceName, configMapName, err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100, "downstream configmap %s/%s was not created", downstreamNamespaceName, configMapName) - - t.Log("Wait for being able to list deployments in the consumer workspace via direct access") - require.Eventually(t, func() bool { - _, err := userKcpClient.Cluster(userWsPath).AppsV1().Deployments("").List(ctx, metav1.ListOptions{}) - if apierrors.IsNotFound(err) { - return false - } else if err != nil { - t.Log(t, "Failed to list deployments: %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Log("Creating upstream Deployment ...") - d := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "tunnel-test", - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"foo": "bar"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "foo": "bar", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "busybox", - Image: "ghcr.io/distroless/busybox:1.35.0-r23", - Command: []string{"/bin/sh", "-c", `date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; sleep 1; done`}, - }, - }, - }, - }, - }, - } - - _, err = userKcpClient.Cluster(userWsPath).AppsV1().Deployments(upstreamNamespaceName).Create(ctx, d, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Log("Waiting for downstream Deployment to be ready ...") - framework.Eventually(t, func() (bool, string) { - deployment, err := downstreamKubeClient.AppsV1().Deployments(downstreamNamespaceName).Get(ctx, d.Name, metav1.GetOptions{}) - if err != nil { - return false, err.Error() - } - if deployment.Status.ReadyReplicas != 1 { - return false, fmt.Sprintf("expected 1 ready replica, got %d", deployment.Status.ReadyReplicas) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - // Get the downstream deployment POD name - pods, err := downstreamKubeClient.CoreV1().Pods(downstreamNamespaceName).List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, pods.Items, 1) - - // Upsync the downstream deployment POD to KCP - pod := pods.Items[0] - pod.ObjectMeta.GenerateName = "" - pod.Namespace = upstreamNamespaceName - pod.ResourceVersion = "" - pod.OwnerReferences = nil - - labels := pod.GetLabels() - if labels == nil { - labels = map[string]string{} - } - labels["state.workload.kcp.io/"+workloadv1alpha1.ToSyncTargetKey(synctargetWsName, syncTarget.Name)] = "Upsync" - pod.SetLabels(labels) - - // Try to create the pod in KCP, it should fail because the user doesn't have the right permissions - _, err = userKcpClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespaceName).Create(ctx, &pod, metav1.CreateOptions{}) - require.EqualError(t, err, "pods is forbidden: User \"user-1\" cannot create resource \"pods\" in API group \"\" in the namespace \"test-syncer\": access denied") - - t.Log("Waiting for the upsyncing of the PODs to KCP") - framework.Eventually(t, func() (bool, string) { - pods, err = userKcpClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespaceName).List(ctx, metav1.ListOptions{ - LabelSelector: "foo=bar", - }) - if apierrors.IsUnauthorized(err) { - return false, fmt.Sprintf("failed to list pods: %v", err) - } - require.NoError(t, err) - - return pods != nil && len(pods.Items) > 0, "upsynced pods not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "couldn't get upstream pods for deployment %s/%s", d.Namespace, d.Name) - - t.Logf("Getting Pod logs from upstream cluster %q as a normal kubectl client would do.", userWs.Name) - framework.Eventually(t, func() (bool, string) { - var podLogs bytes.Buffer - for _, pod := range pods.Items { - request := userKcpClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespaceName).GetLogs(pod.Name, &corev1.PodLogOptions{}) - logs, err := request.Do(ctx).Raw() - if err != nil { - return false, err.Error() - } - podLogs.Write(logs) - } - - return podLogs.Len() > 1, podLogs.String() - }, wait.ForeverTestTimeout, time.Millisecond*100, "couldn't get downstream pod logs for deployment %s/%s", d.Namespace, d.Name) -} - -// TestSyncerTunnelFilter ensures that the syncer tunnel will reject trying to access a Pod that is crafted and not actually upsynced. -func TestSyncerTunnelFilter(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kcpServer := framework.SharedKcpServer(t) - orgPath, _ := framework.NewOrganizationFixture(t, kcpServer, framework.TODO_WithoutMultiShardSupport()) - locationPath, locationWs := framework.NewWorkspaceFixture(t, kcpServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - locationWsName := logicalcluster.Name(locationWs.Spec.Cluster) - userPath, userWs := framework.NewWorkspaceFixture(t, kcpServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - userWsName := logicalcluster.Name(userWs.Spec.Cluster) - - // Creating synctarget and deploying the syncer - syncerFixture := framework.NewSyncerFixture(t, kcpServer, locationPath, framework.WithSyncedUserWorkspaces(userWs)).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - syncerFixture.StartSyncerTunnel(t) - - t.Log("Binding the consumer workspace to the location workspace") - framework.NewBindCompute(t, userWsName.Path(), kcpServer, - framework.WithLocationWorkspaceWorkloadBindOption(locationWsName.Path()), - ).Bind(t) - - kcpClient, err := kcpclientset.NewForConfig(kcpServer.BaseConfig(t)) - require.NoError(t, err) - - syncTarget, err := kcpClient.Cluster(syncerFixture.SyncerConfig.SyncTargetPath).WorkloadV1alpha1().SyncTargets().Get(ctx, - syncerFixture.SyncerConfig.SyncTargetName, - metav1.GetOptions{}, - ) - require.NoError(t, err) - - kcpKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(kcpServer.BaseConfig(t)) - require.NoError(t, err) - - downstreamKubeLikeClient, err := kubernetes.NewForConfig(syncerFixture.SyncerConfig.DownstreamConfig) - require.NoError(t, err) - - upstreamNs, err := kcpKubeClusterClient.CoreV1().Namespaces().Cluster(userPath).Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-syncer", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - nsLocator := shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - ClusterName: string(logicalcluster.From(syncTarget)), - Name: syncTarget.Name, - UID: syncTarget.UID, - }, - ClusterName: logicalcluster.From(upstreamNs), - Namespace: upstreamNs.Name, - } - - downstreamNsName, err := shared.PhysicalClusterNamespaceName(nsLocator) - require.NoError(t, err) - - // Convert the locator to json, as we need to set it on the namespace. - locatorJSON, err := json.Marshal(nsLocator) - require.NoError(t, err) - - // Create a namespace on the downstream cluster that matches the kcp namespace, with a correct locator. - _, err = downstreamKubeLikeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: downstreamNsName, - Labels: map[string]string{ - workloadv1alpha1.InternalDownstreamClusterLabel: workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name), - }, - Annotations: map[string]string{ - shared.NamespaceLocatorAnnotation: string(locatorJSON), - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - // Create a pod downstream that is not upsynced, to ensure that the syncer tunnel will reject it. - _, err = downstreamKubeLikeClient.CoreV1().Pods(downstreamNsName).Create(ctx, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Finalizers: []string{ - shared.SyncerFinalizerNamePrefix + workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name), - }, - Labels: map[string]string{ - workloadv1alpha1.InternalDownstreamClusterLabel: workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name), - workloadv1alpha1.ClusterResourceStateLabelPrefix + workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name): "", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - // Create a pod on the upstream namespace that looks like the downstream pod being upsynced. - upsyncerVirtualWorkspaceConfig := rest.CopyConfig(kcpServer.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVirtualWorkspaceConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClient, userWs, syncerFixture.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - upsyncedClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVirtualWorkspaceConfig) - require.NoError(t, err) - - upsyncedPod, err := upsyncedClient.CoreV1().Pods().Cluster(userWsName.Path()).Namespace(upstreamNs.Name).Create(ctx, &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Finalizers: []string{}, - Labels: map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(syncTarget), syncTarget.Name): "Upsync", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - framework.Eventually(t, func() (bool, string) { - expectedError := fmt.Sprintf("unknown (get pods %s)", upsyncedPod.Name) - request := kcpKubeClusterClient.Cluster(userWsName.Path()).CoreV1().Pods(upstreamNs.Name).GetLogs(upsyncedPod.Name, &corev1.PodLogOptions{}) - _, err = request.Do(ctx).Raw() - if err != nil { - if err.Error() == expectedError { - return true, "" - } - return false, fmt.Sprintf("Returned error: %s is different from expected error: %s", err.Error(), expectedError) - } - return false, "no error returned from get logs" - }, wait.ForeverTestTimeout, time.Millisecond*100, "") - - // Update the downstream namespace locator to point to another synctarget. - locatorJSON, err = json.Marshal(shared.NamespaceLocator{ - SyncTarget: shared.SyncTargetLocator{ - ClusterName: "another-cluster", - Name: "another-sync-target", - UID: "another-sync-target-uid", - }, - ClusterName: logicalcluster.From(upstreamNs), - Namespace: upstreamNs.Name, - }) - require.NoError(t, err) - - // Get a more privileged client to be able to update namespaces. - downstreamAdminKubeClient, err := kubernetes.NewForConfig(syncerFixture.DownstreamConfig) - require.NoError(t, err) - - // Patch the namespace to update the locator. - namespacePatch, err := json.Marshal(map[string]interface{}{ - "metadata": map[string]interface{}{ - "annotations": map[string]interface{}{ - shared.NamespaceLocatorAnnotation: string(locatorJSON), - }, - }, - }) - require.NoError(t, err) - _, err = downstreamAdminKubeClient.CoreV1().Namespaces().Patch(ctx, downstreamNsName, types.MergePatchType, namespacePatch, metav1.PatchOptions{}) - require.NoError(t, err) - - // Let's try to get the pod logs again, this should fail, as the downstream Pod is not actually upsynced. - framework.Eventually(t, func() (bool, string) { - expectedError := fmt.Sprintf("unknown (get pods %s)", upsyncedPod.Name) - request := kcpKubeClusterClient.Cluster(userWsName.Path()).CoreV1().Pods(upstreamNs.Name).GetLogs(upsyncedPod.Name, &corev1.PodLogOptions{}) - _, err = request.Do(ctx).Raw() - if err != nil { - if err.Error() == expectedError { - return true, "" - } - return false, fmt.Sprintf("Returned error: %s is different from expected error: %s", err.Error(), expectedError) - } - return false, "no error returned from get logs" - }, wait.ForeverTestTimeout, time.Millisecond*100, "") -} diff --git a/third_party/coredns/coremain/run.go b/third_party/coredns/coremain/run.go deleted file mode 100644 index efc273e5313..00000000000 --- a/third_party/coredns/coremain/run.go +++ /dev/null @@ -1,75 +0,0 @@ -// This file is a subset https://github.com/coredns/coredns/blob/v010/coremain/run.go -// -// The following changes have been applied compare to the original code: -// - remove code related to command line flags -// - hard-code caddy file content -// - only link plugins needed by KCP - -package coremain - -import ( - "log" - "os" - - "github.com/coredns/caddy" - "github.com/coredns/coredns/core/dnsserver" - _ "github.com/coredns/coredns/plugin/errors" - _ "github.com/coredns/coredns/plugin/forward" - _ "github.com/coredns/coredns/plugin/whoami" - - _ "github.com/kcp-dev/kcp/pkg/dns/plugin/nsmap" -) - -const ( - // Hard-coded kcp DNS configuration - conf = `.:5353 { - errors - nsmap - forward . /etc/resolv.conf -}` -) - -func init() { - // Includes nsmap to the list of directive. Only includes the directives used in the hard-coded configuration - dnsserver.Directives = []string{ - "errors", - "nsmap", - "forward", - } -} - -// Start kcp DNS server -func Start() { - caddy.TrapSignals() - - log.SetOutput(os.Stdout) - log.SetFlags(0) // Set to 0 because we're doing our own time, with timezone - - corefile := caddy.CaddyfileInput{ - Contents: []byte(conf), - Filepath: caddy.DefaultConfigFile, - ServerTypeName: "dns", - } - - // Start your engines - instance, err := caddy.Start(corefile) - if err != nil { - mustLogFatal(err) - } - - // Twiddle your thumbs - instance.Wait() -} - -// mustLogFatal wraps log.Fatal() in a way that ensures the -// output is always printed to stderr so the user can see it -// if the user is still there, even if the process log was not -// enabled. If this process is an upgrade, however, and the user -// might not be there anymore, this just logs to the process -// log and exits. -func mustLogFatal(args ...interface{}) { - if !caddy.IsUpgrade() { - log.SetOutput(os.Stderr) - } - log.Fatal(args...) -} diff --git a/third_party/coredns/name.go b/third_party/coredns/name.go deleted file mode 100644 index 736e0294e63..00000000000 --- a/third_party/coredns/name.go +++ /dev/null @@ -1,93 +0,0 @@ -// This file is a subset of https://github.com/coredns/coredns/blob/v1.10.0/plugin/rewrite/name.go -// -// The following changes have been applied compare to the original code: -// - remove code not related to exact matching -// - export some structs and functions - -package coredns - -import ( - "github.com/miekg/dns" -) - -// RemapStringRewriter maps a dedicated string to another string -// it also maps a domain of a subdomain. -type RemapStringRewriter struct { - orig string - replacement string -} - -func NewRemapStringRewriter(orig, replacement string) *RemapStringRewriter { - return &RemapStringRewriter{orig, replacement} -} - -func (r *RemapStringRewriter) rewriteString(src string) string { - if src == r.orig { - return r.replacement - } - return src -} - -// NameRewriterResponseRule maps a record name according to a stringRewriter. -type NameRewriterResponseRule struct { - *RemapStringRewriter -} - -func (r *NameRewriterResponseRule) RewriteResponse(rr dns.RR) { - rr.Header().Name = r.rewriteString(rr.Header().Name) -} - -// ValueRewriterResponseRule maps a record value according to a stringRewriter. -type ValueRewriterResponseRule struct { - *RemapStringRewriter -} - -func (r *ValueRewriterResponseRule) RewriteResponse(rr dns.RR) { - value := getRecordValueForRewrite(rr) - if value != "" { - new := r.rewriteString(value) - if new != value { - setRewrittenRecordValue(rr, new) - } - } -} - -func getRecordValueForRewrite(rr dns.RR) (name string) { - switch rr.Header().Rrtype { - case dns.TypeSRV: - return rr.(*dns.SRV).Target - case dns.TypeMX: - return rr.(*dns.MX).Mx - case dns.TypeCNAME: - return rr.(*dns.CNAME).Target - case dns.TypeNS: - return rr.(*dns.NS).Ns - case dns.TypeDNAME: - return rr.(*dns.DNAME).Target - case dns.TypeNAPTR: - return rr.(*dns.NAPTR).Replacement - case dns.TypeSOA: - return rr.(*dns.SOA).Ns - default: - return "" - } -} - -func setRewrittenRecordValue(rr dns.RR, value string) { - switch rr.Header().Rrtype { - case dns.TypeSRV: - rr.(*dns.SRV).Target = value - case dns.TypeMX: - rr.(*dns.MX).Mx = value - case dns.TypeCNAME: - rr.(*dns.CNAME).Target = value - case dns.TypeNS: - rr.(*dns.NS).Ns = value - case dns.TypeDNAME: - rr.(*dns.DNAME).Target = value - case dns.TypeNAPTR: - rr.(*dns.NAPTR).Replacement = value - case dns.TypeSOA: - rr.(*dns.SOA).Ns = value - } -} diff --git a/tmc/cmd/deployment-coordinator/cmd/deployment_coordinator.go b/tmc/cmd/deployment-coordinator/cmd/deployment_coordinator.go deleted file mode 100644 index 9f5d8f3c6b1..00000000000 --- a/tmc/cmd/deployment-coordinator/cmd/deployment_coordinator.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cmd - -import ( - "context" - "time" - - kubernetesinformers "github.com/kcp-dev/client-go/informers" - kubernetesclient "github.com/kcp-dev/client-go/kubernetes" - "github.com/spf13/cobra" - - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" - logsapiv1 "k8s.io/component-base/logs/api/v1" - "k8s.io/component-base/version" - - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - "github.com/kcp-dev/kcp/pkg/reconciler/coordination/deployment" - "github.com/kcp-dev/kcp/tmc/cmd/deployment-coordinator/options" -) - -const numThreads = 2 - -const resyncPeriod = 10 * time.Hour - -func NewDeploymentCoordinatorCommand() *cobra.Command { - options := options.NewOptions() - command := &cobra.Command{ - Use: "deployment-coordinator", - Short: "Coordination controller for deployments. Spreads replicas across locations", - RunE: func(cmd *cobra.Command, args []string) error { - if err := logsapiv1.ValidateAndApply(options.Logs, kcpfeatures.DefaultFeatureGate); err != nil { - return err - } - if err := options.Complete(); err != nil { - return err - } - - if err := options.Validate(); err != nil { - return err - } - - ctx := genericapiserver.SetupSignalContext() - if err := Run(ctx, options); err != nil { - return err - } - - <-ctx.Done() - - return nil - }, - } - - options.AddFlags(command.Flags()) - - if v := version.Get().String(); len(v) == 0 { - command.Version = "" - } else { - command.Version = v - } - - return command -} - -func Run(ctx context.Context, options *options.Options) error { - defaultLoadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - defaultLoadingRules.ExplicitPath = options.Kubeconfig - r, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( - defaultLoadingRules, - &clientcmd.ConfigOverrides{ - CurrentContext: options.Context, - ClusterInfo: api.Cluster{ - Server: options.Server, - }, - }).ClientConfig() - if err != nil { - return err - } - - kcpVersion := version.Get().GitVersion - - kcpCluterClient, err := kubernetesclient.NewForConfig(rest.AddUserAgent(rest.CopyConfig(r), "kcp#deployment-coordinator/"+kcpVersion)) - if err != nil { - return err - } - - kubeInformerFactory := kubernetesinformers.NewSharedInformerFactoryWithOptions(kcpCluterClient, resyncPeriod) - - controller, err := deployment.NewController(ctx, kcpCluterClient, kubeInformerFactory.Apps().V1().Deployments()) - if err != nil { - return err - } - kubeInformerFactory.Start(ctx.Done()) - kubeInformerFactory.WaitForCacheSync(ctx.Done()) - - controller.Start(ctx, numThreads) - - return nil -} diff --git a/tmc/cmd/deployment-coordinator/main.go b/tmc/cmd/deployment-coordinator/main.go deleted file mode 100644 index 8005169757c..00000000000 --- a/tmc/cmd/deployment-coordinator/main.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "os" - - "k8s.io/component-base/cli" - _ "k8s.io/component-base/logs/json/register" - - "github.com/kcp-dev/kcp/tmc/cmd/deployment-coordinator/cmd" -) - -func main() { - syncerCommand := cmd.NewDeploymentCoordinatorCommand() - code := cli.Run(syncerCommand) - os.Exit(code) -} diff --git a/tmc/cmd/deployment-coordinator/options/options.go b/tmc/cmd/deployment-coordinator/options/options.go deleted file mode 100644 index 84c5fabba23..00000000000 --- a/tmc/cmd/deployment-coordinator/options/options.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "github.com/spf13/pflag" - - "k8s.io/component-base/logs" - logsapiv1 "k8s.io/component-base/logs/api/v1" -) - -type Options struct { - Kubeconfig string - Context string - Server string - Logs *logs.Options -} - -func NewOptions() *Options { - // Default to -v=2 - logsOptions := logs.NewOptions() - logsOptions.Verbosity = logsapiv1.VerbosityLevel(2) - - return &Options{ - Logs: logsOptions, - } -} - -func (options *Options) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&options.Kubeconfig, "kubeconfig", options.Kubeconfig, "Kubeconfig file.") - fs.StringVar(&options.Context, "context", options.Context, "Context to use in the Kubeconfig file, instead of the current context.") - fs.StringVar(&options.Server, "server", options.Server, "APIServer URL to use in the Kubeconfig file, instead of the one in the current context.") - logsapiv1.AddFlags(options.Logs, fs) -} - -func (options *Options) Complete() error { - return nil -} - -func (options *Options) Validate() error { - return nil -} diff --git a/tmc/pkg/coordination/helpers.go b/tmc/pkg/coordination/helpers.go deleted file mode 100644 index 5388c5072ff..00000000000 --- a/tmc/pkg/coordination/helpers.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package coordination - -import ( - "context" - "reflect" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - syncercontext "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/context" - "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/transformations" -) - -// FilteredSyncerViewsChanged returns true if the syncer view fields changed between old and new -// for at least one of the SyncTarget filtered by the [keepSynctarget] function. -func FilteredSyncerViewsChanged(old, new metav1.Object, keepSyncTarget func(syncTargetKey string) bool) bool { - oldSyncerViewAnnotations := make(map[string]string) - for name, value := range old.GetAnnotations() { - if strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - if keepSyncTarget(strings.TrimPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix)) { - oldSyncerViewAnnotations[name] = value - } - } - } - - newSyncerViewAnnotations := make(map[string]string) - for name, value := range new.GetAnnotations() { - if strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - if keepSyncTarget(strings.TrimPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix)) { - newSyncerViewAnnotations[name] = value - } - } - } - return !reflect.DeepEqual(oldSyncerViewAnnotations, newSyncerViewAnnotations) -} - -// SyncerViewChanged returns true if the syncer view fields changed between old and new -// for at least one of the SyncTargets on which the resource is synced. -func AnySyncerViewChanged(old, new metav1.Object) bool { - return FilteredSyncerViewsChanged(old, new, func(key string) bool { - return true - }) -} - -// SyncerViewChanged returns true if the syncer view fields changed between old and new -// for the given SyncTarget. -func SyncerViewChanged(old, new metav1.Object, syncTargetKey string) bool { - return FilteredSyncerViewsChanged(old, new, func(key string) bool { - return syncTargetKey == key - }) -} - -type Object interface { - metav1.Object - runtime.Object -} - -// UpstreamViewChanged check equality between old and new, ignoring the syncer view annotations. -func UpstreamViewChanged(old, new Object, equality func(old, new interface{}) bool) bool { - old = old.DeepCopyObject().(Object) - new = new.DeepCopyObject().(Object) - - oldAnnotations := make(map[string]string) - for name, value := range old.GetAnnotations() { - if !strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - oldAnnotations[name] = value - } - } - old.SetAnnotations(oldAnnotations) - - newAnnotations := make(map[string]string) - for name, value := range new.GetAnnotations() { - if strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - newAnnotations[name] = value - } - } - new.SetAnnotations(newAnnotations) - - return !equality(old, new) -} - -// SyncerViewRetriever allows retrieving the syncer views on an upstream object. -// It is designed to use the same transformation and overriding logic as the one -// used by the Syncer virtual workspace. -type SyncerViewRetriever[T Object] interface { - // GetFilteredSyncerViews retrieves the syncer views of the resource for - // all the SyncTargets filtered by the [keepSyncTarget] function - GetFilteredSyncerViews(ctx context.Context, gvr schema.GroupVersionResource, upstreamResource T, keepSyncTarget func(key string) bool) (map[string]T, error) - // GetAllSyncerViews retrieves the syncer views of the resource for - // all the SyncTargets on which the resource is being synced - GetAllSyncerViews(ctx context.Context, gvr schema.GroupVersionResource, upstreamResource T) (map[string]T, error) - // GetSyncerView retrieves the syncer view of the resource for - // the given SyncTarget - GetSyncerView(ctx context.Context, gvr schema.GroupVersionResource, upstreamResource T, syncTargetKey string) (T, error) -} - -var _ SyncerViewRetriever[Object] = (*syncerViewRetriever[Object])(nil) - -type syncerViewRetriever[T Object] struct { - transformations.SyncerResourceTransformer -} - -// NewSyncerViewRetriever creates a [SyncerViewRetriever] based on a given -// [transformations.TransformationProvider] and a given [transformations.SummarizingRulesProvider]. -// Retrieving the syncer views on an upstream object should use the same -// transformation and overriding logic as the one used by the Syncer virtual workspace. -// So the 2 arguments should be chosen accordingly. -func NewSyncerViewRetriever[T Object](transformationProvider transformations.TransformationProvider, - summarizingRulesprovider transformations.SummarizingRulesProvider) SyncerViewRetriever[T] { - return &syncerViewRetriever[T]{ - transformations.SyncerResourceTransformer{ - TransformationProvider: transformationProvider, - SummarizingRulesProvider: summarizingRulesprovider, - }, - } -} - -// NewDefaultSyncerViewManager creates a [SyncerViewRetriever] based on the default -// transfomation and summarizing rules providers. -func NewDefaultSyncerViewManager[T Object]() SyncerViewRetriever[T] { - return NewSyncerViewRetriever[T](&transformations.SpecDiffTransformation{}, &transformations.DefaultSummarizingRules{}) -} - -func toUnstructured[T Object](obj T) (*unstructured.Unstructured, error) { - unstructured := &unstructured.Unstructured{Object: map[string]interface{}{}} - raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - if err != nil { - return nil, err - } - unstructured.Object = raw - return unstructured, nil -} - -func fromUnstructured[T Object](unstr *unstructured.Unstructured) (T, error) { - var obj T - err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, &obj) - if err != nil { - return obj, err - } - return obj, nil -} - -func (svm *syncerViewRetriever[T]) GetFilteredSyncerViews(ctx context.Context, gvr schema.GroupVersionResource, upstreamResource T, keepSyncTarget func(key string) bool) (map[string]T, error) { - syncerViews := make(map[string]T) - for name := range upstreamResource.GetAnnotations() { - if !strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - continue - } - syncTargetKey := strings.TrimPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) - if !keepSyncTarget(syncTargetKey) { - continue - } - var unstrResource *unstructured.Unstructured - if unstructured, isUnstructured := Object(upstreamResource).(*unstructured.Unstructured); isUnstructured { - unstrResource = unstructured - } else { - unstructured, err := toUnstructured(upstreamResource) - if err != nil { - return nil, err - } - unstrResource = unstructured - } - if unstrSyncerView, err := svm.SyncerResourceTransformer.AfterRead(nil, syncercontext.WithSyncTargetKey(ctx, syncTargetKey), gvr, unstrResource, nil); err != nil { - return nil, err - } else { - var syncerView T - if typedSyncerView, isTyped := Object(unstrSyncerView).(T); isTyped { - syncerView = typedSyncerView - } else { - if typedSyncerView, err = fromUnstructured[T](unstrSyncerView); err != nil { - return nil, err - } else { - syncerView = typedSyncerView - } - } - syncerViews[syncTargetKey] = syncerView - } - } - return syncerViews, nil -} - -func (svm *syncerViewRetriever[T]) GetAllSyncerViews(ctx context.Context, gvr schema.GroupVersionResource, upstreamResource T) (map[string]T, error) { - return svm.GetFilteredSyncerViews(ctx, gvr, upstreamResource, func(key string) bool { - return true - }) -} - -func (svm *syncerViewRetriever[T]) GetSyncerView(ctx context.Context, gvr schema.GroupVersionResource, upstreamResource T, syncTargetKey string) (T, error) { - if views, err := svm.GetFilteredSyncerViews(ctx, gvr, upstreamResource, func(key string) bool { - return syncTargetKey == key - }); err != nil { - var zeroVal T - return zeroVal, err - } else { - return views[syncTargetKey], nil - } -} diff --git a/tmc/pkg/server/config.go b/tmc/pkg/server/config.go index 1e36db2aa83..9746dc738a6 100644 --- a/tmc/pkg/server/config.go +++ b/tmc/pkg/server/config.go @@ -19,11 +19,7 @@ package server import ( _ "net/http/pprof" - "k8s.io/client-go/rest" - - virtualcommandoptions "github.com/kcp-dev/kcp/cmd/virtual-workspaces/options" coreserver "github.com/kcp-dev/kcp/pkg/server" - corevwoptions "github.com/kcp-dev/kcp/pkg/virtual/options" "github.com/kcp-dev/kcp/tmc/pkg/server/options" ) @@ -71,21 +67,6 @@ func NewConfig(opts options.CompletedOptions) (*Config, error) { return nil, err } - // add tmc virtual workspaces - if opts.Core.Virtual.Enabled { - virtualWorkspacesConfig := rest.CopyConfig(core.GenericConfig.LoopbackClientConfig) - virtualWorkspacesConfig = rest.AddUserAgent(virtualWorkspacesConfig, "virtual-workspaces") - - tmcVWs, err := opts.TmcVirtualWorkspaces.NewVirtualWorkspaces(virtualWorkspacesConfig, virtualcommandoptions.DefaultRootPathPrefix, core.ShardExternalURL, core.CacheKcpSharedInformerFactory) - if err != nil { - return nil, err - } - core.OptionalVirtual.Extra.VirtualWorkspaces, err = corevwoptions.Merge(core.OptionalVirtual.Extra.VirtualWorkspaces, tmcVWs) - if err != nil { - return nil, err - } - } - c := &Config{ Options: opts, Core: core, diff --git a/tmc/pkg/server/controllers.go b/tmc/pkg/server/controllers.go index 41109810f5e..1a639b6750e 100644 --- a/tmc/pkg/server/controllers.go +++ b/tmc/pkg/server/controllers.go @@ -21,28 +21,12 @@ import ( "fmt" _ "net/http/pprof" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - kcpapiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/kcp/clientset/versioned" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/client-go/rest" "k8s.io/klog/v2" "github.com/kcp-dev/kcp/pkg/reconciler/apis/apiresource" - schedulinglocationstatus "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/location" - schedulingplacement "github.com/kcp-dev/kcp/pkg/reconciler/scheduling/placement" - workloadsapiexport "github.com/kcp-dev/kcp/pkg/reconciler/workload/apiexport" - workloadsdefaultlocation "github.com/kcp-dev/kcp/pkg/reconciler/workload/defaultlocation" - "github.com/kcp-dev/kcp/pkg/reconciler/workload/heartbeat" - workloadnamespace "github.com/kcp-dev/kcp/pkg/reconciler/workload/namespace" - workloadplacement "github.com/kcp-dev/kcp/pkg/reconciler/workload/placement" - workloadreplicateclusterrole "github.com/kcp-dev/kcp/pkg/reconciler/workload/replicateclusterrole" - workloadreplicateclusterrolebinding "github.com/kcp-dev/kcp/pkg/reconciler/workload/replicateclusterrolebinding" - workloadreplicatelogicalcluster "github.com/kcp-dev/kcp/pkg/reconciler/workload/replicatelogicalcluster" - workloadresource "github.com/kcp-dev/kcp/pkg/reconciler/workload/resource" - synctargetcontroller "github.com/kcp-dev/kcp/pkg/reconciler/workload/synctarget" - "github.com/kcp-dev/kcp/pkg/reconciler/workload/synctargetexports" kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" ) @@ -50,38 +34,6 @@ func postStartHookName(controllerName string) string { return fmt.Sprintf("kcp-tmc-start-%s", controllerName) } -func (s *Server) installWorkloadResourceScheduler(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadresource.ControllerName) - dynamicClusterClient, err := kcpdynamic.NewForConfig(config) - if err != nil { - return err - } - - resourceScheduler, err := workloadresource.NewController( - dynamicClusterClient, - s.Core.DiscoveringDynamicSharedInformerFactory, - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.CacheKcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.KubeSharedInformerFactory.Core().V1().Namespaces(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Placements(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(workloadresource.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadresource.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go resourceScheduler.Start(ctx, 2) - return nil - }) -} - func (s *Server) installApiResourceController(ctx context.Context, config *rest.Config) error { config = rest.CopyConfig(config) config = rest.AddUserAgent(config, apiresource.ControllerName) @@ -118,374 +70,3 @@ func (s *Server) installApiResourceController(ctx context.Context, config *rest. return nil }) } - -func (s *Server) installSyncTargetHeartbeatController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, heartbeat.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := heartbeat.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Options.Controllers.SyncTargetHeartbeat.HeartbeatThreshold, - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(heartbeat.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(heartbeat.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(ctx) - - return nil - }) -} - -func (s *Server) installSchedulingLocationStatusController(ctx context.Context, config *rest.Config) error { - controllerName := "kcp-scheduling-location-status-controller" - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, controllerName) - - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := schedulinglocationstatus.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Locations(), - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(controllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(controllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadNamespaceScheduler(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadnamespace.ControllerName) - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := workloadnamespace.NewController( - kubeClusterClient, - s.Core.KubeSharedInformerFactory.Core().V1().Namespaces(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Placements(), - ) - if err != nil { - return err - } - - if err := s.Core.AddPostStartHook(postStartHookName(workloadnamespace.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadnamespace.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }); err != nil { - return err - } - - return nil -} - -func (s *Server) installWorkloadPlacementScheduler(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadplacement.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := workloadplacement.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Core().V1alpha1().LogicalClusters(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Locations(), - s.Core.CacheKcpSharedInformerFactory.Scheduling().V1alpha1().Locations(), - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.CacheKcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Placements(), - s.Core.KcpSharedInformerFactory.Apis().V1alpha1().APIBindings(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(workloadplacement.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadplacement.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installSchedulingPlacementController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, schedulingplacement.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := schedulingplacement.NewController( - kcpClusterClient, - s.Core.KubeSharedInformerFactory.Core().V1().Namespaces(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Locations(), - s.Core.CacheKcpSharedInformerFactory.Scheduling().V1alpha1().Locations(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Placements(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(schedulingplacement.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(schedulingplacement.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadAPIExportController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadsapiexport.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := workloadsapiexport.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Apis().V1alpha1().APIExports(), - s.Core.KcpSharedInformerFactory.Apis().V1alpha1().APIResourceSchemas(), - s.Core.KcpSharedInformerFactory.Apiresource().V1alpha1().NegotiatedAPIResources(), - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(workloadsapiexport.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadsapiexport.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadDefaultLocationController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadsdefaultlocation.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := workloadsdefaultlocation.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.KcpSharedInformerFactory.Scheduling().V1alpha1().Locations(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(workloadsdefaultlocation.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadsdefaultlocation.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadSyncTargetExportController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, synctargetexports.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := synctargetexports.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.KcpSharedInformerFactory.Apis().V1alpha1().APIExports(), - s.Core.CacheKcpSharedInformerFactory.Apis().V1alpha1().APIExports(), - s.Core.KcpSharedInformerFactory.Apis().V1alpha1().APIResourceSchemas(), - s.Core.CacheKcpSharedInformerFactory.Apis().V1alpha1().APIResourceSchemas(), - s.Core.KcpSharedInformerFactory.Apiresource().V1alpha1().APIResourceImports(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(synctargetexports.ControllerName, func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(synctargetexports.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installSyncTargetController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, synctargetcontroller.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c := synctargetcontroller.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - s.Core.KcpSharedInformerFactory.Core().V1alpha1().Shards(), - s.Core.CacheKcpSharedInformerFactory.Core().V1alpha1().Shards(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(synctargetcontroller.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(synctargetcontroller.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadReplicateClusterRoleControllers(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadreplicateclusterrole.ControllerName) - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) - if err != nil { - return err - } - - c := workloadreplicateclusterrole.NewController( - kubeClusterClient, - s.Core.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), - s.Core.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), - ) - - return s.Core.AddPostStartHook(postStartHookName(workloadreplicateclusterrole.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadreplicateclusterrole.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadReplicateClusterRoleBindingControllers(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadreplicateclusterrolebinding.ControllerName) - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) - if err != nil { - return err - } - - c := workloadreplicateclusterrolebinding.NewController( - kubeClusterClient, - s.Core.KubeSharedInformerFactory.Rbac().V1().ClusterRoleBindings(), - s.Core.KubeSharedInformerFactory.Rbac().V1().ClusterRoles(), - ) - - return s.Core.AddPostStartHook(postStartHookName(workloadreplicateclusterrolebinding.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadreplicateclusterrolebinding.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} - -func (s *Server) installWorkloadReplicateLogicalClusterControllers(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, workloadreplicatelogicalcluster.ControllerName) - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c := workloadreplicatelogicalcluster.NewController( - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Core().V1alpha1().LogicalClusters(), - s.Core.KcpSharedInformerFactory.Workload().V1alpha1().SyncTargets(), - ) - - return s.Core.AddPostStartHook(postStartHookName(workloadreplicatelogicalcluster.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(workloadreplicatelogicalcluster.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(goContext(hookContext), 2) - - return nil - }) -} diff --git a/tmc/pkg/server/options/controllers.go b/tmc/pkg/server/options/controllers.go index a8130895cb4..900c5bc8d3b 100644 --- a/tmc/pkg/server/options/controllers.go +++ b/tmc/pkg/server/options/controllers.go @@ -20,26 +20,21 @@ import ( "github.com/spf13/pflag" apiresource "github.com/kcp-dev/kcp/pkg/reconciler/apis/apiresource/options" - heartbeat "github.com/kcp-dev/kcp/pkg/reconciler/workload/heartbeat/options" ) type Controllers struct { - ApiResource ApiResourceController - SyncTargetHeartbeat SyncTargetHeartbeatController + ApiResource ApiResourceController } type ApiResourceController = apiresource.Options -type SyncTargetHeartbeatController = heartbeat.Options func NewTmcControllers() *Controllers { return &Controllers{ - ApiResource: *apiresource.NewOptions(), - SyncTargetHeartbeat: *heartbeat.NewOptions(), + ApiResource: *apiresource.NewOptions(), } } func (c *Controllers) AddFlags(fs *pflag.FlagSet) { - c.SyncTargetHeartbeat.AddFlags(fs) c.ApiResource.AddFlags(fs) } @@ -53,9 +48,6 @@ func (c *Controllers) Validate() []error { if err := c.ApiResource.Validate(); err != nil { errs = append(errs, err) } - if err := c.SyncTargetHeartbeat.Validate(); err != nil { - errs = append(errs, err) - } return errs } diff --git a/tmc/pkg/server/options/options.go b/tmc/pkg/server/options/options.go index b0c8451a795..3092afc5e7e 100644 --- a/tmc/pkg/server/options/options.go +++ b/tmc/pkg/server/options/options.go @@ -20,13 +20,11 @@ import ( cliflag "k8s.io/component-base/cli/flag" kcpcoreoptions "github.com/kcp-dev/kcp/pkg/server/options" - tmcvirtualoptions "github.com/kcp-dev/kcp/tmc/pkg/virtual/options" ) type Options struct { - Core kcpcoreoptions.Options - TmcControllers Controllers - TmcVirtualWorkspaces tmcvirtualoptions.Options + Core kcpcoreoptions.Options + TmcControllers Controllers Extra ExtraOptions } @@ -35,9 +33,8 @@ type ExtraOptions struct { } type completedOptions struct { - Core kcpcoreoptions.CompletedOptions - Controllers Controllers - TmcVirtualWorkspaces tmcvirtualoptions.Options + Core kcpcoreoptions.CompletedOptions + Controllers Controllers Extra ExtraOptions } @@ -49,9 +46,8 @@ type CompletedOptions struct { // NewOptions creates a new Options with default parameters. func NewOptions(rootDir string) *Options { o := &Options{ - Core: *kcpcoreoptions.NewOptions(rootDir), - TmcControllers: *NewTmcControllers(), - TmcVirtualWorkspaces: *tmcvirtualoptions.NewOptions(), + Core: *kcpcoreoptions.NewOptions(rootDir), + TmcControllers: *NewTmcControllers(), Extra: ExtraOptions{}, } @@ -62,7 +58,6 @@ func NewOptions(rootDir string) *Options { func (o *Options) AddFlags(fss *cliflag.NamedFlagSets) { o.Core.AddFlags(fss) o.TmcControllers.AddFlags(fss.FlagSet("KCP Controllers")) - o.TmcVirtualWorkspaces.AddFlags(fss.FlagSet("KCP Virtual Workspaces")) } func (o *CompletedOptions) Validate() []error { @@ -85,10 +80,9 @@ func (o *Options) Complete(rootDir string) (*CompletedOptions, error) { return &CompletedOptions{ completedOptions: &completedOptions{ - Core: *core, - Controllers: o.TmcControllers, - TmcVirtualWorkspaces: o.TmcVirtualWorkspaces, - Extra: o.Extra, + Core: *core, + Controllers: o.TmcControllers, + Extra: o.Extra, }, }, nil } diff --git a/tmc/pkg/server/server.go b/tmc/pkg/server/server.go index ab31d135b3a..ad51483654a 100644 --- a/tmc/pkg/server/server.go +++ b/tmc/pkg/server/server.go @@ -26,7 +26,6 @@ import ( "k8s.io/klog/v2" configrootcompute "github.com/kcp-dev/kcp/config/rootcompute" - kcpfeatures "github.com/kcp-dev/kcp/pkg/features" coreserver "github.com/kcp-dev/kcp/pkg/server" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" ) @@ -93,56 +92,6 @@ func (s *Server) Run(ctx context.Context) error { if err := s.installApiResourceController(ctx, controllerConfig); err != nil { return err } - if err := s.installSyncTargetHeartbeatController(ctx, controllerConfig); err != nil { - return err - } - if err := s.installSyncTargetController(ctx, controllerConfig); err != nil { - return err - } - if err := s.installWorkloadSyncTargetExportController(ctx, controllerConfig); err != nil { - return err - } - - if err := s.installWorkloadReplicateClusterRoleControllers(ctx, controllerConfig); err != nil { - return err - } - - if err := s.installWorkloadReplicateClusterRoleBindingControllers(ctx, controllerConfig); err != nil { - return err - } - - if err := s.installWorkloadReplicateLogicalClusterControllers(ctx, controllerConfig); err != nil { - return err - } - } - - if s.Options.Core.Controllers.EnableAll || enabled.Has("resource-scheduler") { - if err := s.installWorkloadResourceScheduler(ctx, controllerConfig); err != nil { - return err - } - } - - if kcpfeatures.DefaultFeatureGate.Enabled(kcpfeatures.LocationAPI) { - if s.Options.Core.Controllers.EnableAll || enabled.Has("scheduling") { - if err := s.installWorkloadNamespaceScheduler(ctx, controllerConfig); err != nil { - return err - } - if err := s.installWorkloadPlacementScheduler(ctx, controllerConfig); err != nil { - return err - } - if err := s.installSchedulingLocationStatusController(ctx, controllerConfig); err != nil { - return err - } - if err := s.installSchedulingPlacementController(ctx, controllerConfig); err != nil { - return err - } - if err := s.installWorkloadAPIExportController(ctx, controllerConfig); err != nil { - return err - } - if err := s.installWorkloadDefaultLocationController(ctx, controllerConfig); err != nil { - return err - } - } } return s.Core.Run(ctx) diff --git a/tmc/pkg/virtual/options/options.go b/tmc/pkg/virtual/options/options.go deleted file mode 100644 index fb503643859..00000000000 --- a/tmc/pkg/virtual/options/options.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "github.com/spf13/pflag" - - "k8s.io/client-go/rest" - - "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" - "github.com/kcp-dev/kcp/pkg/virtual/options" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" - synceroptions "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/options" -) - -const virtualWorkspacesFlagPrefix = "virtual-workspaces-" - -type Options struct { - Syncer *synceroptions.Syncer -} - -func NewOptions() *Options { - return &Options{ - Syncer: synceroptions.New(), - } -} - -func (o *Options) Validate() []error { - var errs []error - - errs = append(errs, o.Syncer.Validate(virtualWorkspacesFlagPrefix)...) - - return errs -} - -func (o *Options) AddFlags(fs *pflag.FlagSet) { -} - -func (o *Options) NewVirtualWorkspaces( - config *rest.Config, - rootPathPrefix string, - shardExternalURL func() string, - cachedKcpInformers kcpinformers.SharedInformerFactory, -) ([]rootapiserver.NamedVirtualWorkspace, error) { - syncer, err := o.Syncer.NewVirtualWorkspaces(rootPathPrefix, shardExternalURL, config, cachedKcpInformers) - if err != nil { - return nil, err - } - - all, err := options.Merge(syncer) - if err != nil { - return nil, err - } - return all, nil -} diff --git a/tmc/pkg/virtual/syncer/builder/build.go b/tmc/pkg/virtual/syncer/builder/build.go deleted file mode 100644 index 3c3e53aad08..00000000000 --- a/tmc/pkg/virtual/syncer/builder/build.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builder - -import ( - "strings" - - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/virtual/framework/forwardingregistry" - "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" - "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/controllers/apireconciler" - "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/transformations" - "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/upsyncer" -) - -const ( - // SyncerVirtualWorkspaceName holds the name of the virtual workspace for the syncer, used to sync resources from upstream to downstream. - SyncerVirtualWorkspaceName string = "syncer" - // UpsyncerVirtualWorkspaceName holds the name of the virtual workspace for the upsyncer, used to sync resources from downstream to upstream. - UpsyncerVirtualWorkspaceName string = "upsyncer" -) - -// BuildVirtualWorkspace builds two virtual workspaces, SyncerVirtualWorkspace and UpsyncerVirtualWorkspace by instantiating a DynamicVirtualWorkspace which, -// combined with a ForwardingREST REST storage implementation, serves a SyncTargetAPI list maintained by the APIReconciler controller. -func BuildVirtualWorkspace( - rootPathPrefix string, - shardExternalURL func() string, - kubeClusterClient kcpkubernetesclientset.ClusterInterface, - dynamicClusterClient kcpdynamic.ClusterInterface, - cachedKCPInformers kcpinformers.SharedInformerFactory, -) []rootapiserver.NamedVirtualWorkspace { - if !strings.HasSuffix(rootPathPrefix, "/") { - rootPathPrefix += "/" - } - - // Setup the APIReconciler indexes to share between both virtualworkspaces. - indexers.AddIfNotPresentOrDie( - cachedKCPInformers.Workload().V1alpha1().SyncTargets().Informer().GetIndexer(), - cache.Indexers{ - apireconciler.IndexSyncTargetsByExport: apireconciler.IndexSyncTargetsByExports, - }, - ) - indexers.AddIfNotPresentOrDie( - cachedKCPInformers.Apis().V1alpha1().APIExports().Informer().GetIndexer(), - cache.Indexers{ - apireconciler.IndexAPIExportsByAPIResourceSchema: apireconciler.IndexAPIExportsByAPIResourceSchemas, - }, - ) - - provider := templateProvider{ - kubeClusterClient: kubeClusterClient, - dynamicClusterClient: dynamicClusterClient, - cachedKCPInformers: cachedKCPInformers, - rootPathPrefix: rootPathPrefix, - } - - return []rootapiserver.NamedVirtualWorkspace{ - { - Name: SyncerVirtualWorkspaceName, - VirtualWorkspace: provider.newTemplate(templateParameters{ - virtualWorkspaceName: SyncerVirtualWorkspaceName, - filteredResourceState: workloadv1alpha1.ResourceStateSync, - restProviderBuilder: NewSyncerRestProvider, - allowedAPIFilter: func(apiGroupResource schema.GroupResource) bool { - // Don't expose Pods via the Syncer VirtualWorkspace. - if apiGroupResource.Group == "" && - (apiGroupResource.Resource == "pods") { - return false - } - return true - }, - transformer: &transformations.SyncerResourceTransformer{ - ShardExternalURL: shardExternalURL(), - TransformationProvider: &transformations.SpecDiffTransformation{}, - SummarizingRulesProvider: &transformations.DefaultSummarizingRules{}, - }, - storageWrapperBuilder: forwardingregistry.WithStaticLabelSelector, - }).buildVirtualWorkspace(), - }, - { - Name: UpsyncerVirtualWorkspaceName, - VirtualWorkspace: provider.newTemplate(templateParameters{ - virtualWorkspaceName: UpsyncerVirtualWorkspaceName, - filteredResourceState: workloadv1alpha1.ResourceStateUpsync, - restProviderBuilder: NewUpSyncerRestProvider, - allowedAPIFilter: func(apiGroupResource schema.GroupResource) bool { - // Only allow persistentvolumes and Pods to be Upsynced. - return apiGroupResource.Group == "" && - (apiGroupResource.Resource == "persistentvolumes" || - apiGroupResource.Resource == "pods" || - apiGroupResource.Resource == "endpoints") - }, - transformer: &upsyncer.UpsyncerResourceTransformer{}, - storageWrapperBuilder: upsyncer.WithStaticLabelSelectorAndInWriteCallsCheck, - }).buildVirtualWorkspace(), - }, - } -} diff --git a/tmc/pkg/virtual/syncer/builder/forwarding.go b/tmc/pkg/virtual/syncer/builder/forwarding.go deleted file mode 100644 index 5a5e09547ea..00000000000 --- a/tmc/pkg/virtual/syncer/builder/forwarding.go +++ /dev/null @@ -1,234 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builder - -import ( - "context" - - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" - "k8s.io/apiextensions-apiserver/pkg/registry/customresource" - "k8s.io/apimachinery/pkg/api/validation/path" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kube-openapi/pkg/validation/validate" - - "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apiserver" - registry "github.com/kcp-dev/kcp/pkg/virtual/framework/forwardingregistry" -) - -type BuildRestProviderFunc func(ctx context.Context, clusterClient kcpdynamic.ClusterInterface, apiExportIdentityHash string, wrapper registry.StorageWrapper) apiserver.RestProviderFunc - -// NewSyncerRestProvider returns a forwarding storage build function, with an optional storage wrapper e.g. to add label based filtering. -func NewSyncerRestProvider(ctx context.Context, clusterClient kcpdynamic.ClusterInterface, apiExportIdentityHash string, wrapper registry.StorageWrapper) apiserver.RestProviderFunc { - return func(resource schema.GroupVersionResource, kind schema.GroupVersionKind, listKind schema.GroupVersionKind, typer runtime.ObjectTyper, tableConvertor rest.TableConvertor, namespaceScoped bool, schemaValidator *validate.SchemaValidator, subresourcesSchemaValidator map[string]*validate.SchemaValidator, structuralSchema *structuralschema.Structural) (mainStorage rest.Storage, subresourceStorages map[string]rest.Storage) { - statusSchemaValidate, statusEnabled := subresourcesSchemaValidator["status"] - - var statusSpec *apiextensions.CustomResourceSubresourceStatus - if statusEnabled { - statusSpec = &apiextensions.CustomResourceSubresourceStatus{} - } - - var scaleSpec *apiextensions.CustomResourceSubresourceScale - // TODO(sttts): implement scale subresource - - strategy := customresource.NewStrategy( - typer, - namespaceScoped, - kind, - path.ValidatePathSegmentName, - schemaValidator, - statusSchemaValidate, - map[string]*structuralschema.Structural{resource.Version: structuralSchema}, - statusSpec, - scaleSpec, - ) - - storage, statusStorage := registry.NewStorage( - ctx, - resource, - apiExportIdentityHash, - kind, - listKind, - strategy, - nil, - tableConvertor, - nil, - func(ctx context.Context) (kcpdynamic.ClusterInterface, error) { return clusterClient, nil }, - nil, - wrapper, - ) - - // we want to expose some but not all the allowed endpoints, so filter by exposing just the funcs we need - subresourceStorages = make(map[string]rest.Storage) - if statusEnabled { - subresourceStorages["status"] = &struct { - registry.FactoryFunc - registry.DestroyerFunc - - registry.GetterFunc - registry.UpdaterFunc - // patch is implicit as we have get + update - - registry.TableConvertorFunc - registry.CategoriesProviderFunc - registry.ResetFieldsStrategyFunc - }{ - FactoryFunc: statusStorage.FactoryFunc, - DestroyerFunc: statusStorage.DestroyerFunc, - - GetterFunc: statusStorage.GetterFunc, - UpdaterFunc: statusStorage.UpdaterFunc, - - TableConvertorFunc: statusStorage.TableConvertorFunc, - CategoriesProviderFunc: statusStorage.CategoriesProviderFunc, - ResetFieldsStrategyFunc: statusStorage.ResetFieldsStrategyFunc, - } - } - - // TODO(sttts): add scale subresource - - return &struct { - registry.FactoryFunc - registry.ListFactoryFunc - registry.DestroyerFunc - - registry.GetterFunc - registry.ListerFunc - registry.UpdaterFunc - registry.WatcherFunc - - registry.TableConvertorFunc - registry.CategoriesProviderFunc - registry.ResetFieldsStrategyFunc - }{ - FactoryFunc: storage.FactoryFunc, - ListFactoryFunc: storage.ListFactoryFunc, - DestroyerFunc: storage.DestroyerFunc, - - GetterFunc: storage.GetterFunc, - ListerFunc: storage.ListerFunc, - UpdaterFunc: storage.UpdaterFunc, - WatcherFunc: storage.WatcherFunc, - - TableConvertorFunc: storage.TableConvertorFunc, - CategoriesProviderFunc: storage.CategoriesProviderFunc, - ResetFieldsStrategyFunc: storage.ResetFieldsStrategyFunc, - }, subresourceStorages - } -} - -// NewUpSyncerRestProvider returns a forwarding storage build function, with an optional storage wrapper e.g. to add label based filtering. -func NewUpSyncerRestProvider(ctx context.Context, clusterClient kcpdynamic.ClusterInterface, apiExportIdentityHash string, wrapper registry.StorageWrapper) apiserver.RestProviderFunc { - return func(resource schema.GroupVersionResource, kind schema.GroupVersionKind, listKind schema.GroupVersionKind, typer runtime.ObjectTyper, tableConvertor rest.TableConvertor, namespaceScoped bool, schemaValidator *validate.SchemaValidator, subresourcesSchemaValidator map[string]*validate.SchemaValidator, structuralSchema *structuralschema.Structural) (mainStorage rest.Storage, subresourceStorages map[string]rest.Storage) { - statusSchemaValidate, statusEnabled := subresourcesSchemaValidator["status"] - - var statusSpec *apiextensions.CustomResourceSubresourceStatus - if statusEnabled { - statusSpec = &apiextensions.CustomResourceSubresourceStatus{} - } - - strategy := customresource.NewStrategy( - typer, - namespaceScoped, - kind, - path.ValidatePathSegmentName, - schemaValidator, - statusSchemaValidate, - map[string]*structuralschema.Structural{resource.Version: structuralSchema}, - statusSpec, - nil, - ) - - storage, statusStorage := registry.NewStorage( - ctx, - resource, - apiExportIdentityHash, - kind, - listKind, - strategy, - nil, - tableConvertor, - nil, - func(ctx context.Context) (kcpdynamic.ClusterInterface, error) { return clusterClient, nil }, - nil, - wrapper, - ) - - // we want to expose some but not all the allowed endpoints, so filter by exposing just the funcs we need - subresourceStorages = make(map[string]rest.Storage) - if statusEnabled { - subresourceStorages["status"] = &struct { - registry.FactoryFunc - registry.DestroyerFunc - - registry.GetterFunc - registry.UpdaterFunc - // patch is implicit as we have get + update - - registry.TableConvertorFunc - registry.CategoriesProviderFunc - registry.ResetFieldsStrategyFunc - }{ - FactoryFunc: statusStorage.FactoryFunc, - DestroyerFunc: statusStorage.DestroyerFunc, - - GetterFunc: statusStorage.GetterFunc, - UpdaterFunc: statusStorage.UpdaterFunc, - - TableConvertorFunc: statusStorage.TableConvertorFunc, - CategoriesProviderFunc: statusStorage.CategoriesProviderFunc, - ResetFieldsStrategyFunc: statusStorage.ResetFieldsStrategyFunc, - } - } - - return &struct { - registry.FactoryFunc - registry.ListFactoryFunc - registry.DestroyerFunc - - registry.GetterFunc - registry.ListerFunc - registry.CreaterFunc - registry.UpdaterFunc - registry.WatcherFunc - registry.GracefulDeleterFunc - - registry.TableConvertorFunc - registry.CategoriesProviderFunc - registry.ResetFieldsStrategyFunc - }{ - FactoryFunc: storage.FactoryFunc, - ListFactoryFunc: storage.ListFactoryFunc, - DestroyerFunc: storage.DestroyerFunc, - - GetterFunc: storage.GetterFunc, - ListerFunc: storage.ListerFunc, - CreaterFunc: storage.CreaterFunc, - UpdaterFunc: storage.UpdaterFunc, - WatcherFunc: storage.WatcherFunc, - GracefulDeleterFunc: storage.GracefulDeleterFunc, - - TableConvertorFunc: storage.TableConvertorFunc, - CategoriesProviderFunc: storage.CategoriesProviderFunc, - ResetFieldsStrategyFunc: storage.ResetFieldsStrategyFunc, - }, subresourceStorages - } -} diff --git a/tmc/pkg/virtual/syncer/builder/template.go b/tmc/pkg/virtual/syncer/builder/template.go deleted file mode 100644 index c5fd86fec59..00000000000 --- a/tmc/pkg/virtual/syncer/builder/template.go +++ /dev/null @@ -1,290 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builder - -import ( - "context" - "errors" - "fmt" - "strings" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apiserver/pkg/authorization/authorizer" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/authorization/delegated" - "github.com/kcp-dev/kcp/pkg/virtual/framework" - virtualworkspacesdynamic "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic" - "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apidefinition" - "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apiserver" - dynamiccontext "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/context" - "github.com/kcp-dev/kcp/pkg/virtual/framework/forwardingregistry" - "github.com/kcp-dev/kcp/pkg/virtual/framework/transforming" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" - syncercontext "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/context" - "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/controllers/apireconciler" -) - -type templateProvider struct { - kubeClusterClient kcpkubernetesclientset.ClusterInterface - dynamicClusterClient kcpdynamic.ClusterInterface - cachedKCPInformers kcpinformers.SharedInformerFactory - rootPathPrefix string -} - -type templateParameters struct { - virtualWorkspaceName string - - filteredResourceState workloadv1alpha1.ResourceState - - restProviderBuilder BuildRestProviderFunc - allowedAPIFilter apireconciler.AllowedAPIfilterFunc - transformer transforming.ResourceTransformer - storageWrapperBuilder func(labels.Requirements) forwardingregistry.StorageWrapper -} - -func (p *templateProvider) newTemplate(parameters templateParameters) *template { - return &template{ - templateProvider: *p, - templateParameters: parameters, - readyCh: make(chan struct{}), - } -} - -type template struct { - templateProvider - templateParameters - - readyCh chan struct{} -} - -func (t *template) resolveRootPath(urlPath string, requestContext context.Context) (accepted bool, prefixToStrip string, completedContext context.Context) { - select { - case <-t.readyCh: - default: - return - } - - rootPathPrefix := t.rootPathPrefix + t.virtualWorkspaceName + "/" - completedContext = requestContext - if !strings.HasPrefix(urlPath, rootPathPrefix) { - return - } - withoutRootPathPrefix := strings.TrimPrefix(urlPath, rootPathPrefix) - - // Incoming requests to this virtual workspace will look like: - // /services/(up)syncer/root:org:ws///clusters/*/api/v1/configmaps - // └───────────────────────┐ - // Where the withoutRootPathPrefix starts here: ┘ - parts := strings.SplitN(withoutRootPathPrefix, "/", 4) - if len(parts) < 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { - return - } - path := logicalcluster.NewPath(parts[0]) - syncTargetName := parts[1] - syncTargetUID := parts[2] - - clusterName, ok := path.Name() - if !ok { - return - } - - apiDomainKey := dynamiccontext.APIDomainKey(kcpcache.ToClusterAwareKey(clusterName.String(), "", syncTargetName)) - - // In order to avoid conflicts with reusing deleted synctarget names, let's make sure that the synctarget name and synctarget UID match, if not, - // that likely means that a syncer is running with a stale synctarget that got deleted. - syncTarget, err := t.cachedKCPInformers.Workload().V1alpha1().SyncTargets().Cluster(clusterName).Lister().Get(syncTargetName) - if err != nil { - utilruntime.HandleError(fmt.Errorf("failed to get synctarget %s|%s: %w", path, syncTargetName, err)) - return - } - if string(syncTarget.UID) != syncTargetUID { - utilruntime.HandleError(fmt.Errorf("sync target UID mismatch: %s != %s", syncTarget.UID, syncTargetUID)) - return - } - - realPath := "/" - if len(parts) > 3 { - realPath += parts[3] - } - - // /services/(up)syncer/root:org:ws///clusters/*/api/v1/configmaps - // ┌────────────────────────────────────────────────────┘ - // We are now here: ┘ - // Now, we parse out the logical cluster. - if !strings.HasPrefix(realPath, "/clusters/") { - return // don't accept - } - - withoutClustersPrefix := strings.TrimPrefix(realPath, "/clusters/") - parts = strings.SplitN(withoutClustersPrefix, "/", 2) - reqPath := logicalcluster.NewPath(parts[0]) - realPath = "/" - if len(parts) > 1 { - realPath += parts[1] - } - var cluster genericapirequest.Cluster - if reqPath == logicalcluster.Wildcard { - cluster.Wildcard = true - } else { - reqClusterName, ok := reqPath.Name() - if !ok { - return - } - cluster.Name = reqClusterName - } - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(clusterName, syncTargetName) - completedContext = genericapirequest.WithCluster(requestContext, cluster) - completedContext = syncercontext.WithSyncTargetKey(completedContext, syncTargetKey) - completedContext = dynamiccontext.WithAPIDomainKey(completedContext, apiDomainKey) - prefixToStrip = strings.TrimSuffix(urlPath, realPath) - accepted = true - return -} - -func (t *template) ready() error { - select { - case <-t.readyCh: - return nil - default: - return errors.New("syncer virtual workspace controllers are not started") - } -} - -func (t *template) authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) { - syncTargetKey := dynamiccontext.APIDomainKeyFrom(ctx) - negotiationWorkspaceName, _, syncTargetName, err := kcpcache.SplitMetaClusterNamespaceKey(string(syncTargetKey)) - if err != nil { - return authorizer.DecisionNoOpinion, "", err - } - - authz, err := delegated.NewDelegatedAuthorizer(negotiationWorkspaceName, t.kubeClusterClient, delegated.Options{}) - if err != nil { - return authorizer.DecisionNoOpinion, "Error", err - } - SARAttributes := authorizer.AttributesRecord{ - User: a.GetUser(), - Verb: "sync", - Name: syncTargetName, - APIGroup: workloadv1alpha1.SchemeGroupVersion.Group, - APIVersion: workloadv1alpha1.SchemeGroupVersion.Version, - Resource: "synctargets", - ResourceRequest: true, - } - return authz.Authorize(ctx, SARAttributes) -} - -func (t *template) bootstrapManagement(mainConfig genericapiserver.CompletedConfig) (apidefinition.APIDefinitionSetGetter, error) { - apiReconciler, err := apireconciler.NewAPIReconciler( - t.virtualWorkspaceName, - t.cachedKCPInformers.Workload().V1alpha1().SyncTargets(), - t.cachedKCPInformers.Apis().V1alpha1().APIResourceSchemas(), - t.cachedKCPInformers.Apis().V1alpha1().APIExports(), - func(syncTargetClusterName logicalcluster.Name, syncTargetName string, apiResourceSchema *apisv1alpha1.APIResourceSchema, version string, apiExportIdentityHash string) (apidefinition.APIDefinition, error) { - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncTargetClusterName, syncTargetName) - requirements, selectable := labels.SelectorFromSet(map[string]string{ - workloadv1alpha1.ClusterResourceStateLabelPrefix + syncTargetKey: string(t.filteredResourceState), - }).Requirements() - if !selectable { - return nil, fmt.Errorf("unable to build requirements for synctargetkey %s and resource state %s", syncTargetKey, t.filteredResourceState) - } - storageWrapper := t.storageWrapperBuilder(requirements) - transformingClient := t.dynamicClusterClient - if t.transformer != nil { - transformingClient = transforming.WithResourceTransformer(t.dynamicClusterClient, t.transformer) - } - ctx, cancelFn := context.WithCancel(context.Background()) - storageBuilder := t.restProviderBuilder(ctx, transformingClient, apiExportIdentityHash, storageWrapper) - def, err := apiserver.CreateServingInfoFor(mainConfig, apiResourceSchema, version, storageBuilder) - if err != nil { - cancelFn() - return nil, err - } - return &apiDefinitionWithCancel{ - APIDefinition: def, - cancelFn: cancelFn, - }, nil - }, - t.allowedAPIFilter, - ) - if err != nil { - return nil, err - } - - if err := mainConfig.AddPostStartHook(apireconciler.ControllerName+t.virtualWorkspaceName, func(hookContext genericapiserver.PostStartHookContext) error { - defer close(t.readyCh) - - for name, informer := range map[string]cache.SharedIndexInformer{ - "synctargets": t.cachedKCPInformers.Workload().V1alpha1().SyncTargets().Informer(), - "apiresourceschemas": t.cachedKCPInformers.Apis().V1alpha1().APIResourceSchemas().Informer(), - "apiexports": t.cachedKCPInformers.Apis().V1alpha1().APIExports().Informer(), - } { - if !cache.WaitForNamedCacheSync(name, hookContext.StopCh, informer.HasSynced) { - klog.Background().Error(nil, "informer not synced") - return nil - } - } - - go apiReconciler.Start(goContext(hookContext)) - return nil - }); err != nil { - return nil, err - } - - return apiReconciler, nil -} - -func (t *template) buildVirtualWorkspace() *virtualworkspacesdynamic.DynamicVirtualWorkspace { - return &virtualworkspacesdynamic.DynamicVirtualWorkspace{ - RootPathResolver: framework.RootPathResolverFunc(t.resolveRootPath), - Authorizer: authorizer.AuthorizerFunc(t.authorize), - ReadyChecker: framework.ReadyFunc(t.ready), - BootstrapAPISetManagement: t.bootstrapManagement, - } -} - -// apiDefinitionWithCancel calls the cancelFn on tear-down. -type apiDefinitionWithCancel struct { - apidefinition.APIDefinition - cancelFn func() -} - -func (d *apiDefinitionWithCancel) TearDown() { - d.cancelFn() - d.APIDefinition.TearDown() -} - -func goContext(parent genericapiserver.PostStartHookContext) context.Context { - ctx, cancel := context.WithCancel(context.Background()) - go func(done <-chan struct{}) { - <-done - cancel() - }(parent.StopCh) - return ctx -} diff --git a/tmc/pkg/virtual/syncer/context/keys.go b/tmc/pkg/virtual/syncer/context/keys.go deleted file mode 100644 index d9214b9be91..00000000000 --- a/tmc/pkg/virtual/syncer/context/keys.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package context - -import ( - "context" - "errors" -) - -// syncTargetNameContextKeyType is the type of the key for the request context value -// that will carry the name of the SyncTarget resources with be synced with. -type syncTargetNameContextKeyType string - -// apiDomainKeyContextKey is the key for the request context value -// that will carry the name of the SyncTarget resources with be synced with. -const syncTargetKeyContextKey syncTargetNameContextKeyType = "SyncerVirtualWorkspaceSyncTargetKey" - -// WithSyncTargetKey adds a SyncTarget key to the context. -func WithSyncTargetKey(ctx context.Context, syncTargetKey string) context.Context { - return context.WithValue(ctx, syncTargetKeyContextKey, syncTargetKey) -} - -// SyncTargetKeyFrom retrieves the SyncTarget name key from the context, if any. -func SyncTargetKeyFrom(ctx context.Context) (string, error) { - stk, hasSyncTargetKey := ctx.Value(syncTargetKeyContextKey).(string) - if !hasSyncTargetKey { - return "", errors.New("context must contain a valid non-empty SyncTarget key") - } - return stk, nil -} diff --git a/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_controller.go b/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_controller.go deleted file mode 100644 index a1fb3235944..00000000000 --- a/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_controller.go +++ /dev/null @@ -1,286 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apireconciler - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/go-logr/logr" - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apidefinition" - dynamiccontext "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/context" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - apisv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/apis/v1alpha1" - workloadv1alpha1informers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions/workload/v1alpha1" - apisv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/apis/v1alpha1" - workloadv1alpha1listers "github.com/kcp-dev/kcp/sdk/client/listers/workload/v1alpha1" -) - -const ( - ControllerName = "kcp-virtual-syncer-api-reconciler-" - IndexSyncTargetsByExport = ControllerName + "ByExport" - IndexAPIExportsByAPIResourceSchema = ControllerName + "ByAPIResourceSchema" -) - -type CreateAPIDefinitionFunc func(syncTargetWorkspace logicalcluster.Name, syncTargetName string, apiResourceSchema *apisv1alpha1.APIResourceSchema, version string, identityHash string) (apidefinition.APIDefinition, error) -type AllowedAPIfilterFunc func(apiGroupResource schema.GroupResource) bool - -func NewAPIReconciler( - virtualWorkspaceName string, - syncTargetInformer workloadv1alpha1informers.SyncTargetClusterInformer, - apiResourceSchemaInformer apisv1alpha1informers.APIResourceSchemaClusterInformer, - apiExportInformer apisv1alpha1informers.APIExportClusterInformer, - createAPIDefinition CreateAPIDefinitionFunc, - allowedAPIfilter AllowedAPIfilterFunc, -) (*APIReconciler, error) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName+virtualWorkspaceName) - - c := &APIReconciler{ - virtualWorkspaceName: virtualWorkspaceName, - - syncTargetLister: syncTargetInformer.Lister(), - syncTargetIndexer: syncTargetInformer.Informer().GetIndexer(), - - apiResourceSchemaLister: apiResourceSchemaInformer.Lister(), - - apiExportLister: apiExportInformer.Lister(), - apiExportIndexer: apiExportInformer.Informer().GetIndexer(), - - queue: queue, - - createAPIDefinition: createAPIDefinition, - allowedAPIfilter: allowedAPIfilter, - - apiSets: map[dynamiccontext.APIDomainKey]apidefinition.APIDefinitionSet{}, - } - - logger := logging.WithReconciler(klog.Background(), ControllerName+virtualWorkspaceName) - - indexers.AddIfNotPresentOrDie(apiExportInformer.Informer().GetIndexer(), cache.Indexers{ - indexers.ByLogicalClusterPathAndName: indexers.IndexByLogicalClusterPathAndName, - }) - - _, _ = syncTargetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueSyncTarget(obj, logger, "") }, - UpdateFunc: func(old, obj interface{}) { - oldCluster := old.(*workloadv1alpha1.SyncTarget) - newCluster := obj.(*workloadv1alpha1.SyncTarget) - - // only enqueue when syncedResource is changed. - if !equality.Semantic.DeepEqual(oldCluster.Status.SyncedResources, newCluster.Status.SyncedResources) { - c.enqueueSyncTarget(obj, logger, "") - } - }, - DeleteFunc: func(obj interface{}) { c.enqueueSyncTarget(obj, logger, "") }, - }) - - _, _ = apiResourceSchemaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueAPIResourceSchema(obj, logger) }, - DeleteFunc: func(obj interface{}) { c.enqueueAPIResourceSchema(obj, logger) }, - }) - - _, _ = apiExportInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { c.enqueueAPIExport(obj, logger, "") }, - UpdateFunc: func(_, obj interface{}) { c.enqueueAPIExport(obj, logger, "") }, - DeleteFunc: func(obj interface{}) { c.enqueueAPIExport(obj, logger, "") }, - }) - - return c, nil -} - -// APIReconciler is a controller watching APIExports, APIResourceSchemas and SyncTargets, and updates the -// API definitions driving the virtual workspace. -type APIReconciler struct { - virtualWorkspaceName string - - syncTargetLister workloadv1alpha1listers.SyncTargetClusterLister - syncTargetIndexer cache.Indexer - - apiResourceSchemaLister apisv1alpha1listers.APIResourceSchemaClusterLister - - apiExportLister apisv1alpha1listers.APIExportClusterLister - apiExportIndexer cache.Indexer - - queue workqueue.RateLimitingInterface - - createAPIDefinition CreateAPIDefinitionFunc - allowedAPIfilter AllowedAPIfilterFunc - - mutex sync.RWMutex // protects the map, not the values! - apiSets map[dynamiccontext.APIDomainKey]apidefinition.APIDefinitionSet -} - -func (c *APIReconciler) enqueueSyncTarget(obj interface{}, logger logr.Logger, logSuffix string) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - logging.WithQueueKey(logger, key).V(2).Info(fmt.Sprintf("queueing SyncTarget%s", logSuffix)) - c.queue.Add(key) -} - -func (c *APIReconciler) enqueueAPIExport(obj interface{}, logger logr.Logger, logSuffix string) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - syncTargets, err := indexers.ByIndex[*workloadv1alpha1.SyncTarget](c.syncTargetIndexer, IndexSyncTargetsByExport, key) - if err != nil { - runtime.HandleError(err) - return - } - - for _, syncTarget := range syncTargets { - logger := logging.WithObject(logger, syncTarget) - c.enqueueSyncTarget(syncTarget, logger, " because of APIExport") - } -} - -// enqueueAPIResourceSchema maps an APIResourceSchema to APIExports for enqueuing. -func (c *APIReconciler) enqueueAPIResourceSchema(obj interface{}, logger logr.Logger) { - key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj) - if err != nil { - runtime.HandleError(err) - return - } - - apiExports, err := indexers.ByIndex[*apisv1alpha1.APIExport](c.apiExportIndexer, IndexAPIExportsByAPIResourceSchema, key) - if err != nil { - runtime.HandleError(err) - return - } - - for _, apiExport := range apiExports { - logger := logging.WithObject(logger, apiExport) - c.enqueueAPIExport(apiExport, logger, " because of APIResourceSchema") - } -} - -func (c *APIReconciler) startWorker(ctx context.Context) { - for c.processNextWorkItem(ctx) { - } -} - -func (c *APIReconciler) Start(ctx context.Context) { - defer runtime.HandleCrash() - defer c.queue.ShutDown() - - logger := logging.WithReconciler(klog.FromContext(ctx), ControllerName+c.virtualWorkspaceName) - ctx = klog.NewContext(ctx, logger) - logger.Info("Starting controller") - defer logger.Info("Shutting down controller") - - go wait.Until(func() { c.startWorker(ctx) }, time.Second, ctx.Done()) - - // stop all watches if the controller is stopped - defer func() { - c.mutex.Lock() - defer c.mutex.Unlock() - for _, sets := range c.apiSets { - for _, v := range sets { - v.TearDown() - } - } - }() - - <-ctx.Done() -} - -func (c *APIReconciler) ShutDown() { - c.queue.ShutDown() -} - -func (c *APIReconciler) processNextWorkItem(ctx context.Context) bool { - // Wait until there is a new item in the working queue - k, quit := c.queue.Get() - if quit { - return false - } - key := k.(string) - - // No matter what, tell the queue we're done with this key, to unblock - // other workers. - defer c.queue.Done(key) - - if err := c.process(ctx, key); err != nil { - runtime.HandleError(fmt.Errorf("%s: failed to sync %q, err: %w", ControllerName+c.virtualWorkspaceName, key, err)) - c.queue.AddRateLimited(key) - return true - } - - c.queue.Forget(key) - return true -} - -func (c *APIReconciler) process(ctx context.Context, key string) error { - apiDomainKey := dynamiccontext.APIDomainKey(key) - - logger := logging.WithQueueKey(klog.FromContext(ctx), key) - ctx = klog.NewContext(ctx, logger) - - clusterName, _, syncTargetName, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - runtime.HandleError(err) - return nil - } - syncTarget, err := c.syncTargetLister.Cluster(clusterName).Get(syncTargetName) - if apierrors.IsNotFound(err) { - c.removeAPIDefinitionSet(apiDomainKey) - return nil - } - if err != nil { - return err - } - - return c.reconcile(ctx, apiDomainKey, syncTarget) -} - -func (c *APIReconciler) GetAPIDefinitionSet(_ context.Context, key dynamiccontext.APIDomainKey) (apidefinition.APIDefinitionSet, bool, error) { - c.mutex.RLock() - defer c.mutex.RUnlock() - - apiSet, ok := c.apiSets[key] - return apiSet, ok, nil -} - -func (c *APIReconciler) removeAPIDefinitionSet(key dynamiccontext.APIDomainKey) { - c.mutex.Lock() - defer c.mutex.Unlock() - - delete(c.apiSets, key) -} diff --git a/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_indexes.go b/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_indexes.go deleted file mode 100644 index 4fd229f6d91..00000000000 --- a/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_indexes.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apireconciler - -import ( - "github.com/kcp-dev/logicalcluster/v3" - - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - "github.com/kcp-dev/kcp/sdk/client" -) - -// IndexAPIExportsByAPIResourceSchemas is an index function that maps an APIExport to its spec.latestResourceSchemas. -func IndexAPIExportsByAPIResourceSchemas(obj interface{}) ([]string, error) { - apiExport := obj.(*apisv1alpha1.APIExport) - - ret := make([]string, len(apiExport.Spec.LatestResourceSchemas)) - for i := range apiExport.Spec.LatestResourceSchemas { - ret[i] = client.ToClusterAwareKey(logicalcluster.From(apiExport).Path(), apiExport.Spec.LatestResourceSchemas[i]) - } - - return ret, nil -} - -func IndexSyncTargetsByExports(obj interface{}) ([]string, error) { - syncTarget := obj.(*workloadv1alpha1.SyncTarget) - - clusterName := logicalcluster.From(syncTarget) - keys := make([]string, 0, len(syncTarget.Spec.SupportedAPIExports)) - for _, export := range syncTarget.Spec.SupportedAPIExports { - path := export.Path - if path == "" { - path = clusterName.String() - } - keys = append(keys, client.ToClusterAwareKey(logicalcluster.NewPath(path), export.Export)) - } - - return keys, nil -} diff --git a/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_reconcile.go b/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_reconcile.go deleted file mode 100644 index 8b271826ea0..00000000000 --- a/tmc/pkg/virtual/syncer/controllers/apireconciler/syncer_apireconciler_reconcile.go +++ /dev/null @@ -1,232 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apireconciler - -import ( - "context" - "fmt" - - "github.com/kcp-dev/logicalcluster/v3" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/indexers" - "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/apidefinition" - dynamiccontext "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/context" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - syncerbuiltin "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/schemas/builtin" -) - -func (c *APIReconciler) reconcile(ctx context.Context, apiDomainKey dynamiccontext.APIDomainKey, syncTarget *workloadv1alpha1.SyncTarget) error { - c.mutex.RLock() - oldSet := c.apiSets[apiDomainKey] - c.mutex.RUnlock() - - logger := klog.FromContext(ctx) - - // collect APIResourceSchemas by syncTarget. - apiResourceSchemas, schemaIdentites, err := c.getAllAcceptedResourceSchemas(ctx, syncTarget) - if err != nil { - return err - } - - // add built-in apiResourceSchema - for _, apiResourceSchema := range syncerbuiltin.SyncerSchemas { - shallow := *apiResourceSchema - if shallow.Annotations == nil { - shallow.Annotations = make(map[string]string) - } - shallow.Annotations[logicalcluster.AnnotationKey] = logicalcluster.From(syncTarget).String() - apiResourceSchemas[schema.GroupResource{ - Group: apiResourceSchema.Spec.Group, - Resource: apiResourceSchema.Spec.Names.Plural, - }] = &shallow - } - - // reconcile APIs for APIResourceSchemas - newSet := apidefinition.APIDefinitionSet{} - newGVRs := []string{} - preservedGVR := []string{} - for gr, apiResourceSchema := range apiResourceSchemas { - if c.allowedAPIfilter != nil && !c.allowedAPIfilter(gr) { - continue - } - - for _, version := range apiResourceSchema.Spec.Versions { - if !version.Served { - continue - } - - gvr := schema.GroupVersionResource{ - Group: gr.Group, - Version: version.Name, - Resource: gr.Resource, - } - - oldDef, found := oldSet[gvr] - if found { - oldDef := oldDef.(apiResourceSchemaApiDefinition) - if oldDef.UID != apiResourceSchema.UID { - logging.WithObject(logger, apiResourceSchema).V(4).Info("APIResourceSchema UID has changed:", "oldUID", oldDef.UID, "newUID", apiResourceSchema.UID) - } - if oldDef.IdentityHash != schemaIdentites[gr] { - logging.WithObject(logger, apiResourceSchema).V(4).Info("APIResourceSchema identity hash has changed", "oldIdentityHash", oldDef.IdentityHash, "newIdentityHash", schemaIdentites[gr]) - } - if oldDef.UID == apiResourceSchema.UID && oldDef.IdentityHash == schemaIdentites[gr] { - // this is the same schema and identity as before. no need to update. - newSet[gvr] = oldDef - preservedGVR = append(preservedGVR, gvrString(gvr)) - continue - } - } - - apiDefinition, err := c.createAPIDefinition(logicalcluster.From(syncTarget), syncTarget.Name, apiResourceSchema, version.Name, schemaIdentites[gr]) - if err != nil { - logger.WithValues("gvr", gvr).Error(err, "failed to create API definition") - continue - } - - newSet[gvr] = apiResourceSchemaApiDefinition{ - APIDefinition: apiDefinition, - UID: apiResourceSchema.UID, - IdentityHash: schemaIdentites[gr], - } - newGVRs = append(newGVRs, gvrString(gvr)) - } - } - - // cleanup old definitions - removedGVRs := []string{} - for gvr, oldDef := range oldSet { - if _, found := newSet[gvr]; !found || oldDef != newSet[gvr] { - removedGVRs = append(removedGVRs, gvrString(gvr)) - oldDef.TearDown() - } - } - - logging.WithObject(logger, syncTarget).WithValues("APIDomainKey", apiDomainKey).V(2).Info("Updating APIs for SyncTarget and APIDomainKey", "newGVRs", newGVRs, "preservedGVRs", preservedGVR, "removedGVRs", removedGVRs) - - c.mutex.Lock() - defer c.mutex.Unlock() - c.apiSets[apiDomainKey] = newSet - - return nil -} - -type apiResourceSchemaApiDefinition struct { - apidefinition.APIDefinition - - UID types.UID - IdentityHash string -} - -func gvrString(gvr schema.GroupVersionResource) string { - group := gvr.Group - if group == "" { - group = "core" - } - return fmt.Sprintf("%s.%s.%s", gvr.Resource, gvr.Version, group) -} - -// getAllAcceptedResourceSchemas return all resourceSchemas from APIExports defined in this syncTarget filtered by the status.syncedResource -// of syncTarget such that only resources with accepted state is returned, together with their identityHash. -func (c *APIReconciler) getAllAcceptedResourceSchemas(ctx context.Context, syncTarget *workloadv1alpha1.SyncTarget) (map[schema.GroupResource]*apisv1alpha1.APIResourceSchema, map[schema.GroupResource]string, error) { - apiResourceSchemas := map[schema.GroupResource]*apisv1alpha1.APIResourceSchema{} - - identityHashByGroupResource := map[schema.GroupResource]string{} - - logger := klog.FromContext(ctx) - logger.V(4).Info("getting identity hashes for compatible APIs", "count", len(syncTarget.Status.SyncedResources)) - - // get all identityHash for compatible APIs - for _, syncedResource := range syncTarget.Status.SyncedResources { - logger := logger.WithValues( - "group", syncedResource.Group, - "resource", syncedResource.Resource, - "identity", syncedResource.IdentityHash, - ) - if syncedResource.State == workloadv1alpha1.ResourceSchemaAcceptedState { - logger.V(4).Info("including synced resource because it is accepted") - identityHashByGroupResource[schema.GroupResource{ - Group: syncedResource.Group, - Resource: syncedResource.Resource, - }] = syncedResource.IdentityHash - } else { - logger.V(4).Info("excluding synced resource because it is unaccepted") - } - } - - logger.V(4).Info("processing supported APIExports", "count", len(syncTarget.Spec.SupportedAPIExports)) - var errs []error - for _, exportRef := range syncTarget.Spec.SupportedAPIExports { - logger.V(4).Info("looking at export", "path", exportRef.Path, "name", exportRef.Export) - - path := logicalcluster.NewPath(exportRef.Path) - if path.Empty() { - logger.V(4).Info("falling back to sync target's logical cluster for path") - path = logicalcluster.From(syncTarget).Path() - } - - logger := logger.WithValues("path", path, "name", exportRef.Export) - logger.V(4).Info("getting APIExport") - apiExport, err := indexers.ByPathAndName[*apisv1alpha1.APIExport](apisv1alpha1.Resource("apiexports"), c.apiExportIndexer, path, exportRef.Export) - if err != nil { - logger.V(4).Error(err, "error getting APIExport") - errs = append(errs, err) - continue - } - - logger.V(4).Info("checking APIExport's schemas", "count", len(apiExport.Spec.LatestResourceSchemas)) - for _, schemaName := range apiExport.Spec.LatestResourceSchemas { - logger := logger.WithValues("schema", schemaName) - logger.V(4).Info("getting APIResourceSchema") - apiResourceSchema, err := c.apiResourceSchemaLister.Cluster(logicalcluster.From(apiExport)).Get(schemaName) - if apierrors.IsNotFound(err) { - logger.V(4).Info("APIResourceSchema not found") - continue - } - if err != nil { - logger.V(4).Error(err, "error getting APIResourceSchema") - errs = append(errs, err) - continue - } - - gr := schema.GroupResource{ - Group: apiResourceSchema.Spec.Group, - Resource: apiResourceSchema.Spec.Names.Plural, - } - - logger = logger.WithValues("group", gr.Group, "resource", gr.Resource) - - // if identityHash does not exist, it is not a compatible API. - if _, ok := identityHashByGroupResource[gr]; ok { - logger.V(4).Info("identity found, including resource") - apiResourceSchemas[gr] = apiResourceSchema - } else { - logger.V(4).Info("identity not found, excluding resource") - } - } - } - - return apiResourceSchemas, identityHashByGroupResource, errors.NewAggregate(errs) -} diff --git a/tmc/pkg/virtual/syncer/doc.go b/tmc/pkg/virtual/syncer/doc.go deleted file mode 100644 index 673d29255c5..00000000000 --- a/tmc/pkg/virtual/syncer/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package syncer and its sub-packages provide the Syncer Virtual Workspace. -// -// It exposes an APIserver URL for each SyncTarget hosting a syncer agent, -// with REST endpoints for APIs that have been imported from this SyncTarget and published. -// -// It combines and integrates: -// -// - a controller (APIReconciler) that watches for available APIResourceImports and updates the list of installed APIs -// for the corresponding SyncTarget (in the ./controllers package) -// -// - a DynamicVirtualWorkspace instantiation that exposes and serve installed APIs on the right sync-target-dedicated path -// through CRD-like handlers (in the ../framework/dynamic package) -// -// - a REST storage implementation, named ForwardingREST, that can dynamically serve resources by delegating to -// a KCP workspace-aware client-go dynamic client (in the ../framework/forwardingregistry package) -// -// The builder package is the place where all these components are combined together, especially in the -// BuildVirtualWorkspace() function. -package syncer diff --git a/tmc/pkg/virtual/syncer/options/options.go b/tmc/pkg/virtual/syncer/options/options.go deleted file mode 100644 index 30361f1c0de..00000000000 --- a/tmc/pkg/virtual/syncer/options/options.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/spf13/pflag" - - "k8s.io/client-go/rest" - - "github.com/kcp-dev/kcp/pkg/virtual/framework/rootapiserver" - kcpinformers "github.com/kcp-dev/kcp/sdk/client/informers/externalversions" - "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/builder" -) - -type Syncer struct{} - -func New() *Syncer { - return &Syncer{} -} - -func (o *Syncer) AddFlags(flags *pflag.FlagSet, prefix string) { - if o == nil { - return - } -} - -func (o *Syncer) Validate(flagPrefix string) []error { - if o == nil { - return nil - } - errs := []error{} - - return errs -} - -func (o *Syncer) NewVirtualWorkspaces( - rootPathPrefix string, - shardExternalURL func() string, - config *rest.Config, - cachedKCPInformers kcpinformers.SharedInformerFactory, -) (workspaces []rootapiserver.NamedVirtualWorkspace, err error) { - config = rest.AddUserAgent(rest.CopyConfig(config), "syncer-virtual-workspace") - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(config) - if err != nil { - return nil, err - } - dynamicClusterClient, err := kcpdynamic.NewForConfig(config) - if err != nil { - return nil, err - } - - return builder.BuildVirtualWorkspace(rootPathPrefix, shardExternalURL, kubeClusterClient, dynamicClusterClient, cachedKCPInformers), nil -} diff --git a/tmc/pkg/virtual/syncer/schemas/builtin/builtin.go b/tmc/pkg/virtual/syncer/schemas/builtin/builtin.go deleted file mode 100644 index cbabeea4e12..00000000000 --- a/tmc/pkg/virtual/syncer/schemas/builtin/builtin.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builtin - -import ( - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/kube-openapi/pkg/common" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/core/install/genericcontrolplane" - generatedopenapi "k8s.io/kubernetes/pkg/generated/openapi" - - "github.com/kcp-dev/kcp/pkg/virtual/framework/internalapis" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" -) - -// syncerSchemas contains a list of internal APIs that should be exposed for the -// syncer of any SyncTarget. -var SyncerSchemas map[apisv1alpha1.GroupResource]*apisv1alpha1.APIResourceSchema - -func init() { - genericcontrolplane.Install(legacyscheme.Scheme) - schemes := []*runtime.Scheme{legacyscheme.Scheme} - openAPIDefinitionsGetters := []common.GetOpenAPIDefinitions{generatedopenapi.GetOpenAPIDefinitions} - - apis, err := internalapis.CreateAPIResourceSchemas(schemes, openAPIDefinitionsGetters, syncerInternalAPIs...) - if err != nil { - panic(err) - } - - SyncerSchemas = make(map[apisv1alpha1.GroupResource]*apisv1alpha1.APIResourceSchema, len(apis)) - for _, api := range apis { - SyncerSchemas[apisv1alpha1.GroupResource{ - Group: api.Spec.Group, - Resource: api.Spec.Names.Plural, - }] = api - } -} - -// syncerInternalAPIs provides a list of built-in APIs that are available for -// all workspaces accessed via the syncer virtual workspace. -var syncerInternalAPIs = []internalapis.InternalAPI{ - { - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "namespaces", - Singular: "namespace", - Kind: "Namespace", - }, - GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, - Instance: &corev1.Namespace{}, - ResourceScope: apiextensionsv1.ClusterScoped, - HasStatus: true, - }, - { - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "configmaps", - Singular: "configmap", - Kind: "ConfigMap", - }, - GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, - Instance: &corev1.ConfigMap{}, - ResourceScope: apiextensionsv1.NamespaceScoped, - }, - { - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "secrets", - Singular: "secret", - Kind: "Secret", - }, - GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, - Instance: &corev1.Secret{}, - ResourceScope: apiextensionsv1.NamespaceScoped, - }, - { - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "serviceaccounts", - Singular: "serviceaccount", - Kind: "ServiceAccount", - }, - GroupVersion: schema.GroupVersion{Group: "", Version: "v1"}, - Instance: &corev1.ServiceAccount{}, - ResourceScope: apiextensionsv1.NamespaceScoped, - }, -} diff --git a/tmc/pkg/virtual/syncer/schemas/builtin/builtin_test.go b/tmc/pkg/virtual/syncer/schemas/builtin/builtin_test.go deleted file mode 100644 index 5988441fbcd..00000000000 --- a/tmc/pkg/virtual/syncer/schemas/builtin/builtin_test.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package builtin - -import ( - "testing" - - "github.com/stretchr/testify/require" - - _ "k8s.io/kubernetes/pkg/apis/core/install" -) - -func TestInit(t *testing.T) { - require.Equal(t, len(syncerInternalAPIs), len(SyncerSchemas)) -} diff --git a/tmc/pkg/virtual/syncer/transformations/defaultsummarizing.go b/tmc/pkg/virtual/syncer/transformations/defaultsummarizing.go deleted file mode 100644 index c9757f9f948..00000000000 --- a/tmc/pkg/virtual/syncer/transformations/defaultsummarizing.go +++ /dev/null @@ -1,136 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transformations - -import ( - "encoding/json" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var _ SummarizingRules = (*DefaultSummarizingRules)(nil) -var _ SummarizingRulesProvider = (*DefaultSummarizingRules)(nil) - -// DefaultSummarizingRules provides a default minimal implementation of [SummarizingRules]. -// It only adds a status field, which for now is always promoted (see comments below in the code). -type DefaultSummarizingRules struct{} - -type field struct { - FieldPath string `json:"fieldPath"` - PromoteToUpstream bool `json:"PromoteToUpstream,omitempty"` -} - -var _ FieldToSummarize = field{} - -// Path implements [Field.Path]. -func (f field) Path() string { - return f.FieldPath -} - -// Path implements [Field.Path]. -func (f field) pathElements() []string { - return strings.Split(f.FieldPath, ".") -} - -// Set implements [Field.Set]. -func (f field) Set(resource *unstructured.Unstructured, value interface{}) error { - return unstructured.SetNestedField(resource.UnstructuredContent(), value, f.pathElements()...) -} - -// Get implements [Field.Get]. -func (f field) Get(resource *unstructured.Unstructured) (interface{}, bool, error) { - return unstructured.NestedFieldNoCopy(resource.UnstructuredContent(), f.pathElements()...) -} - -// Delete implements [Field.Delete]. -func (f field) Delete(resource *unstructured.Unstructured) { - unstructured.RemoveNestedField(resource.UnstructuredContent(), f.pathElements()...) -} - -// CanPromoteToUpstream implements [Field.CanPromoteToUpstream]. -func (f field) CanPromoteToUpstream() bool { - return f.PromoteToUpstream -} - -// IsStatus implements [Field.IsStatus]. -func (f field) IsStatus() bool { - elements := f.pathElements() - return len(elements) == 1 && elements[0] == "status" -} - -type fields []field - -var _ SummarizingRules = (fields)(nil) - -func (fs fields) FieldsToSummarize(gvr schema.GroupVersionResource) []FieldToSummarize { - result := make([]FieldToSummarize, 0, len(fs)) - for _, f := range fs { - result = append(result, FieldToSummarize(f)) - } - return result -} - -func (s *DefaultSummarizingRules) SummarizingRulesFor(resource metav1.Object) (SummarizingRules, error) { - if encoded := resource.GetAnnotations()[v1alpha1.ExperimentalSummarizingRulesAnnotation]; encoded != "" { - var decoded []field - if err := json.Unmarshal([]byte(encoded), &decoded); err != nil { - return nil, err - } - return fields(decoded), nil - } - return s, nil -} - -func (s *DefaultSummarizingRules) FieldsToSummarize(gvr schema.GroupVersionResource) []FieldToSummarize { - fields := []FieldToSummarize{ - field{ - FieldPath: "status", - PromoteToUpstream: s.canPromoteStatusToUpstream(gvr), - }, - } - - // TODO(davidfestal): In the future, we would add some well-known fields of some standard types - // like Service clusterIP: - // - // if gvr == corev1.GenericControlPlaneSchemeGroupVersion.WithResource("services") { - // fields = append(fields, field{ - // path: "spec.clusterIP", - // canPromoteToUpstream: false, - // }) - // } - - return fields -} - -func (s *DefaultSummarizingRules) canPromoteStatusToUpstream(gvr schema.GroupVersionResource) bool { - switch gvr { - // TODO(davidfestal): In the future, ingresses and services would have default coordination controllers, - // we would never promote their status to the upstream resource, since it is inherently related to - // SyncTarget infrastructure details. - // - // case networkingv1.SchemeGroupVersion.WithResource("ingresses"), - // corev1.GenericControlPlaneSchemeGroupVersion.WithResource("services"): - // return false - default: - return true - } -} diff --git a/tmc/pkg/virtual/syncer/transformations/helpers.go b/tmc/pkg/virtual/syncer/transformations/helpers.go deleted file mode 100644 index 8d63bc5ba6e..00000000000 --- a/tmc/pkg/virtual/syncer/transformations/helpers.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transformations - -import ( - "encoding/json" - "strings" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -// getSyncerViewFields builds a map whose keys are the summarizing field paths, -// and values are the overriding values of the corresponding fields for this SyncTarget. -// This map is built from the value of the diff.syncer.internal.kcp.io/ annotation. -func getSyncerViewFields(upstreamResource *unstructured.Unstructured, syncTargetKey string) (map[string]interface{}, error) { - annotations := upstreamResource.GetAnnotations() - var syncerViewAnnotationValue string - var syncerViewAnnotationFound bool - for name, value := range annotations { - if strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - if syncTargetKey == strings.TrimPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - syncerViewAnnotationValue = value - syncerViewAnnotationFound = true - break - } - } - } - - if !syncerViewAnnotationFound { - return nil, nil - } - - result := make(map[string]interface{}, 1) - if err := json.Unmarshal([]byte(syncerViewAnnotationValue), &result); err != nil { - return nil, err - } - return result, nil -} - -// setSyncerViewFields unmarshalls into the diff.syncer.internal.kcp.io/ annotation -// a map whose keys are summarizing field keys, and values are the overridden values of the corresponding -// fields for this SyncTarget. -func setSyncerViewFields(kcpResource *unstructured.Unstructured, syncTargetKey string, syncerViewFieldValues map[string]interface{}) error { - annotations := kcpResource.GetAnnotations() - - annotationValue, err := json.Marshal(syncerViewFieldValues) - if err != nil { - return err - } - - if annotations == nil { - annotations = make(map[string]string, 1) - } - - annotations[v1alpha1.InternalSyncerViewAnnotationPrefix+syncTargetKey] = string(annotationValue) - kcpResource.SetAnnotations(annotations) - return nil -} diff --git a/tmc/pkg/virtual/syncer/transformations/specdiff.go b/tmc/pkg/virtual/syncer/transformations/specdiff.go deleted file mode 100644 index 20e1eabac7c..00000000000 --- a/tmc/pkg/virtual/syncer/transformations/specdiff.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transformations - -import ( - "encoding/json" - - jsonpatch "github.com/evanphx/json-patch" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kcp-dev/kcp/sdk/apis/workload/helpers" - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" -) - -var _ Transformation = (*SpecDiffTransformation)(nil) -var _ TransformationProvider = (*SpecDiffTransformation)(nil) - -type SpecDiffTransformation struct{} - -func (t *SpecDiffTransformation) TransformationFor(resource metav1.Object) (Transformation, error) { - return t, nil -} - -func (*SpecDiffTransformation) ToSyncerView(syncTargetKey string, gvr schema.GroupVersionResource, newUpstreamResource *unstructured.Unstructured, overridenSyncerViewFields map[string]interface{}, requestedSyncing map[string]helpers.SyncIntent) (newSyncerViewResource *unstructured.Unstructured, err error) { - specDiffPatch := newUpstreamResource.GetAnnotations()[v1alpha1.ClusterSpecDiffAnnotationPrefix+syncTargetKey] - - if specDiffPatch == "" { - return newUpstreamResource, nil - } - - upstreamSpec, specExists, err := unstructured.NestedFieldCopy(newUpstreamResource.UnstructuredContent(), "spec") - if err != nil { - return nil, err - } - if !specExists { - return newUpstreamResource, nil - } - - // TODO(jmprusi): Surface those errors to the user. - patch, err := jsonpatch.DecodePatch([]byte(specDiffPatch)) - if err != nil { - return nil, err - } - upstreamSpecJSON, err := json.Marshal(upstreamSpec) - if err != nil { - return nil, err - } - patchedUpstreamSpecJSON, err := patch.Apply(upstreamSpecJSON) - if err != nil { - return nil, err - } - var newSpec map[string]interface{} - if err := json.Unmarshal(patchedUpstreamSpecJSON, &newSpec); err != nil { - return nil, err - } - if err := unstructured.SetNestedMap(newUpstreamResource.UnstructuredContent(), newSpec, "spec"); err != nil { - return nil, err - } - return newUpstreamResource, nil -} diff --git a/tmc/pkg/virtual/syncer/transformations/transformer.go b/tmc/pkg/virtual/syncer/transformations/transformer.go deleted file mode 100644 index 718bdee76d3..00000000000 --- a/tmc/pkg/virtual/syncer/transformations/transformer.go +++ /dev/null @@ -1,535 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transformations - -import ( - "context" - "errors" - "fmt" - "net/url" - "strings" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - "github.com/kcp-dev/logicalcluster/v3" - - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/klog/v2" - - . "github.com/kcp-dev/kcp/pkg/logging" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - dynamiccontext "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/context" - "github.com/kcp-dev/kcp/pkg/virtual/framework/transforming" - "github.com/kcp-dev/kcp/sdk/apis/workload/helpers" - "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - . "github.com/kcp-dev/kcp/tmc/pkg/logging" - syncercontext "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/context" -) - -const ( - promotedToUpstream = "##promoted##" - - errorMessage = "error during transformation" - startingMessage = "starting transformation" -) - -var _ transforming.ResourceTransformer = (*SyncerResourceTransformer)(nil) - -// SyncerResourceTransformer manages both the transformation of resources exposed to a Syncer -// when syncing to downstream, and the management of fields updated by the Syncer -// when syncing back to upstream. -type SyncerResourceTransformer struct { - ShardExternalURL string - - TransformationProvider - SummarizingRulesProvider -} - -// TransformationFor implements [TransformationProvider.TransformationFor]. -func (srt SyncerResourceTransformer) TransformationFor(resource metav1.Object) (Transformation, error) { - if srt.TransformationProvider == nil { - return nil, nil - } - return srt.TransformationProvider.TransformationFor(resource) -} - -// SummarizingRulesFor implements [SummarizingRulesProvider.SummarizingRulesFor]. -func (srt SyncerResourceTransformer) SummarizingRulesFor(resource metav1.Object) (SummarizingRules, error) { - if srt.SummarizingRulesProvider == nil { - return &DefaultSummarizingRules{}, nil - } - return srt.SummarizingRulesProvider.SummarizingRulesFor(resource) -} - -// BeforeWrite implements [transforming.ResourceTransformer.BeforeWrite]. -// It will be called when the Syncer updates a given resource through the -// Syncer virtual workspace. -// It performs the appropriate cleanup of the resource if the Syncer doesn't own the resource anymore -// (syncer finalizer was removed). -// In all other cases, it applies every summarized fields updated by the Syncer -// to the Syncer View annotation, possibly promoting it to te upstream resource. -func (srt *SyncerResourceTransformer) BeforeWrite(client dynamic.ResourceInterface, ctx context.Context, gvr schema.GroupVersionResource, syncerViewResource *unstructured.Unstructured, subresources ...string) (*unstructured.Unstructured, error) { - apiDomainKey := dynamiccontext.APIDomainKeyFrom(ctx) - _, _, syncTargetName, err := kcpcache.SplitMetaClusterNamespaceKey(string(apiDomainKey)) - if err != nil { - return nil, err - } - logger := klog.FromContext(ctx).WithName("syncer-transformer").V(5). - WithValues("step", "before", "gvr", gvr.String(), "subresources", subresources, "apiDomainKey", apiDomainKey, SyncTargetName, syncTargetName) - logger = logger.WithValues(FromPrefix("syncerView", syncerViewResource)...) - - syncTargetKey, err := syncercontext.SyncTargetKeyFrom(ctx) - if err != nil { - logger.Error(err, errorMessage) - return nil, err - } - logger = logger.WithValues(SyncTargetKey, syncTargetKey) - - syncerFinalizerName := shared.SyncerFinalizerNamePrefix + syncTargetKey - - syncerViewHasSyncerFinalizer := sets.New[string](syncerViewResource.GetFinalizers()...).Has(syncerFinalizerName) - syncerViewDeletionTimestamp := syncerViewResource.GetDeletionTimestamp() - syncerViewResourceVersion := syncerViewResource.GetResourceVersion() - - logger = logger.WithValues( - "syncerView.HasSyncerFinalizer", syncerViewHasSyncerFinalizer, - "syncerView.ResourceVersion", syncerViewResourceVersion, - ) - if syncerViewDeletionTimestamp != nil { - logger = logger.WithValues( - "syncerView.deletionTimestamp", syncerViewDeletionTimestamp, - ) - } else { - logger = logger.WithValues( - "syncerView.deletionTimestamp", "", - ) - } - - logger.Info(startingMessage) - - removedFromSyncer := syncerViewDeletionTimestamp != nil && !syncerViewHasSyncerFinalizer && len(subresources) == 0 - - existingUpstreamResource, err := client.Get(ctx, syncerViewResource.GetName(), metav1.GetOptions{ResourceVersion: syncerViewResourceVersion}) - if err != nil { - return nil, err - } - - logger = logger.WithValues(FromPrefix("upstreamResource", syncerViewResource)...) - - if existingUpstreamResource.GetResourceVersion() != syncerViewResource.GetResourceVersion() { - logger.Info("upstream resource has the wrong resource version: return conflict error to the syncer", "upstreamViewResourceVersion", existingUpstreamResource.GetResourceVersion()) - return nil, kerrors.NewConflict(gvr.GroupResource(), existingUpstreamResource.GetName(), fmt.Errorf("the resource has been modified in the meantime")) - } - - var fieldsToSummarize []FieldToSummarize - if summarizingRules, err := srt.SummarizingRulesFor(existingUpstreamResource); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get summarizing rules from object upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), syncTargetKey, err)) - } else if summarizingRules != nil { - fieldsToSummarize = summarizingRules.FieldsToSummarize(gvr) - } - - if removedFromSyncer { - logger.Info("resource has been removed from the syncer") - existingSyncing, err := helpers.GetSyncIntents(existingUpstreamResource) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(err) - } - delete(existingSyncing, syncTargetKey) - if len(existingSyncing) == 1 { - var singleSyncTarget string - for key := range existingSyncing { - singleSyncTarget = key - } - - if existingSyncing[singleSyncTarget].ResourceState == "Sync" && - sets.New[string](existingUpstreamResource.GetFinalizers()...).Has(shared.SyncerFinalizerNamePrefix+singleSyncTarget) { - // If removing the current SyncTarget leaves only one SyncTarget, - // and the remaining syncTarget has the Sync label, - // then let's promote syncer view overriding field values of this remaining SyncTarget - // for both the Status and Spec (as needed) - - logger.Info("after removing the resource from the syncer, it will be scheduled on only 1 syncTarget => manage promotion") - statusPromoted := false - promoted := false - - existingSyncerViewFields, err := getSyncerViewFields(existingUpstreamResource, singleSyncTarget) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get syncer view fields from upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), singleSyncTarget, err)) - } - if existingSyncerViewFields != nil { - for _, field := range fieldsToSummarize { - logger := logger.WithValues("field", field.Path()) - if !field.CanPromoteToUpstream() { - continue - } - logger.Info("promoting field to the upstream resource") - if syncerViewFieldValue := existingSyncerViewFields[field.Path()]; syncerViewFieldValue != promotedToUpstream { - err := field.Set(existingUpstreamResource, syncerViewFieldValue) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to set promoted syncer view field %s on upstream resource %s|%s/%s for SyncTarget %s: %w", field.Path(), logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), singleSyncTarget, err)) - } - existingSyncerViewFields[field.Path()] = promotedToUpstream - promoted = true - if field.IsStatus() { - statusPromoted = true - } - } - } - if promoted { - if err := setSyncerViewFields(existingUpstreamResource, singleSyncTarget, existingSyncerViewFields); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to set syncer view fields for upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), singleSyncTarget, err)) - } - } - } - - if statusPromoted { - logger.Info("updating the status of the upstream resource after field promotion, before the normal update") - existingUpstreamResource, err = client.UpdateStatus(ctx, existingUpstreamResource, metav1.UpdateOptions{}) - if err != nil { - logger.Error(err, errorMessage) - return nil, err - } - } - } - } - - logger.Info("removing the syncTarget-related labels, annotation and finalizers") - if labels := existingUpstreamResource.GetLabels(); labels != nil { - delete(labels, v1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey) - existingUpstreamResource.SetLabels(labels) - } - - finalizers := sets.New[string](existingUpstreamResource.GetFinalizers()...) - if finalizers.Has(syncerFinalizerName) { - finalizers.Delete(syncerFinalizerName) - existingUpstreamResource.SetFinalizers(sets.List[string](finalizers)) - } - - if annotations := existingUpstreamResource.GetAnnotations(); annotations != nil { - delete(annotations, v1alpha1.InternalSyncerViewAnnotationPrefix+syncTargetKey) - delete(annotations, v1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+syncTargetKey) - delete(annotations, v1alpha1.ClusterSpecDiffAnnotationPrefix) - existingUpstreamResource.SetAnnotations(annotations) - } - - return existingUpstreamResource, nil - } - - logger.Info("checking the requested syncing") - newSyncing, err := helpers.GetSyncIntents(existingUpstreamResource) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(err) - } - if syncTargetSyncing, exists := newSyncing[syncTargetKey]; !exists || syncTargetSyncing.ResourceState != "Sync" { - logger.Error(errors.New("the Syncer tried to write resource though it is not assigned to it"), errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("tried to write resource %s(%s/%s) though it is not assigned to the current SyncTarget", - strings.Join(append([]string{gvr.Resource}, subresources...), "/"), - existingUpstreamResource.GetNamespace(), - existingUpstreamResource.GetName())) - } - - if !syncerViewHasSyncerFinalizer { - logger.Error(errors.New("the Syncer tried to write resource though it is not owning it (syncer finalizer doesn't exist"), errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("tried to write resource %s(%s/%s) though it is not owning it (syncer finalizer doesn't exist)", - strings.Join(append([]string{gvr.Resource}, subresources...), "/"), - existingUpstreamResource.GetNamespace(), - existingUpstreamResource.GetName())) - } - - upstreamObjectSyncerFinalizers := sets.New[string]() - for _, finalizer := range existingUpstreamResource.GetFinalizers() { - if strings.HasPrefix(finalizer, shared.SyncerFinalizerNamePrefix) { - upstreamObjectSyncerFinalizers.Insert(finalizer) - } - } - - if len(newSyncing) > 1 && upstreamObjectSyncerFinalizers.Len() == 1 { - // There's more than one SyncTarget State label - // If there is only one SyncTarget that currently owns the resource (syncer finalizer is present on the upstream resource), - // then we should unpromote the syncer view field that would have been promoted to the upstream resource. - - logger.Info("resource scheduled on several syncTargets, but only one SyncTarget currently owns the resource => manage field unpromoting before the resource is owned by the second syncer") - singleOwningSyncTarget := strings.TrimPrefix(upstreamObjectSyncerFinalizers.UnsortedList()[0], shared.SyncerFinalizerNamePrefix) - - existingSyncerViewFields, err := getSyncerViewFields(existingUpstreamResource, singleOwningSyncTarget) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get syncer view fields from upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), singleOwningSyncTarget, err)) - } - unpromoted := false - for _, field := range fieldsToSummarize { - logger := logger.WithValues("field", field.Path()) - - if !field.CanPromoteToUpstream() { - continue - } - if existingSyncerViewFields[field.Path()] == promotedToUpstream { - logger.Info("unpromoting field from the upstream resource") - promotedUpstreamFieldValue, exists, err := field.Get(existingUpstreamResource) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get promoted syncer view field %s from upstream resource %s|%s/%s for SyncTarget %s: %w", field.Path(), logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), singleOwningSyncTarget, err)) - } - if !exists { - logger.Info("WARNING: this should not happen: the presence of a promoted syncer view field in upstream should match the presence of a promotion marker in the annotation on upstream resource") - continue - } - - existingSyncerViewFields[field.Path()] = promotedUpstreamFieldValue - unpromoted = true - } - } - if unpromoted { - if err := setSyncerViewFields(existingUpstreamResource, singleOwningSyncTarget, existingSyncerViewFields); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to set syncer view fields on upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), singleOwningSyncTarget, err)) - } - } - } - - logger.Info("update the syncer view fields annotation in the upstream resource based on the syncer view") - existingSyncerViewFields, err := getSyncerViewFields(existingUpstreamResource, syncTargetKey) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get syncer view fields from upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), syncTargetKey, err)) - } - - syncerViewFields := make(map[string]interface{}) - - statusSubresource := len(subresources) == 1 && subresources[0] == "status" - for _, field := range fieldsToSummarize { - logger := logger.WithValues("field", field.Path()) - - if field.IsStatus() && !statusSubresource || !field.IsStatus() && statusSubresource { - if existingValue, exists := existingSyncerViewFields[field.Path()]; exists { - logger.Info("keeping the previous syncer view field value") - syncerViewFields[field.Path()] = existingValue - } - continue - } - - logger.Info("getting field on syncerView") - syncerViewFieldValue, exists, err := field.Get(syncerViewResource) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get syncer view field %s from downstream resource %s/%s coming from SyncTarget %s: %w", field.Path(), syncerViewResource.GetNamespace(), syncerViewResource.GetName(), syncTargetKey, err)) - } - if !exists { - logger.Info("field does not exist on syncerView") - continue - } - - if field.CanPromoteToUpstream() { - if len(newSyncing) == 1 { - logger.Info("resource is scheduled on a single syncTarget => promote the field") - // There is only one SyncTarget, so let's promote the field value to the upstream resource - if err := field.Set(existingUpstreamResource, syncerViewFieldValue); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to promote syncer view field %s from downstream resource %s/%s coming from SyncTarget %s: %w", field.Path(), syncerViewResource.GetNamespace(), syncerViewResource.GetName(), syncTargetKey, err)) - } - // Only add a promotion marker for the field in the syncer view fields annotation - syncerViewFieldValue = promotedToUpstream - } - } - - logger.Info("setting the syncer view field value", "value", syncerViewFieldValue) - // Now simply add the field in the syncer view fields annotation - syncerViewFields[field.Path()] = syncerViewFieldValue - } - - logger.Info("setting the updated syncer view fields annotation on the upstream resource") - if err := setSyncerViewFields(existingUpstreamResource, syncTargetKey, syncerViewFields); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to set syncer view fields on upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(existingUpstreamResource), existingUpstreamResource.GetNamespace(), existingUpstreamResource.GetName(), syncTargetKey, err)) - } - - if syncerViewHasSyncerFinalizer && !upstreamObjectSyncerFinalizers.Has(syncerFinalizerName) { - logger.Info("adding the syncer finalizer to the upstream resource") - s := sets.New[string](append(existingUpstreamResource.GetFinalizers(), syncerFinalizerName)...) - existingUpstreamResource.SetFinalizers(sets.List[string](s)) - } - - logger.Info("resource transformed") - return existingUpstreamResource, nil -} - -// AfterRead implements [transforming.ResourceTransformer.AfterRead]. -// It will be called when an upstream resource is read from the Syncer Virtual Workspace -// for a given SyncTarget (typically by the Syncer). -// It transforms the upstream resource according to the provided Transformation, -// and applies on top of the transformed resource every summarized fields previously updated -// by the Syncer. -func (srt *SyncerResourceTransformer) AfterRead(_ dynamic.ResourceInterface, ctx context.Context, gvr schema.GroupVersionResource, upstreamResource *unstructured.Unstructured, eventType *watch.EventType, subresources ...string) (*unstructured.Unstructured, error) { - apiDomainKey := dynamiccontext.APIDomainKeyFrom(ctx) - logger := klog.FromContext(ctx).WithName("syncer-transformer").V(5). - WithValues("step", "after", "groupVersionResource", gvr.String(), "subresources", subresources, "apiDomainKey", apiDomainKey) - if eventType != nil { - logger = logger.WithValues("eventType", *eventType) - } else { - logger = logger.WithValues("eventType", "") - } - logger = logger.WithValues(FromPrefix("upstreamResource", upstreamResource)...) - - syncTargetKey, err := syncercontext.SyncTargetKeyFrom(ctx) - if err != nil { - logger.Error(err, errorMessage) - return nil, err - } - logger = logger.WithValues(SyncTargetKey, syncTargetKey) - - logger.Info(startingMessage) - - syncerFinalizerName := shared.SyncerFinalizerNamePrefix + syncTargetKey - - syncing, err := helpers.GetSyncIntents(upstreamResource) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(err) - } - - syncerViewFields, err := getSyncerViewFields(upstreamResource, syncTargetKey) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get syncer view fields from upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(upstreamResource), upstreamResource.GetNamespace(), upstreamResource.GetName(), syncTargetKey, err)) - } - - cleanedUpstreamResource := upstreamResource.DeepCopy() - cleanedUpstreamResource.SetFinalizers(nil) - cleanedUpstreamResource.SetDeletionTimestamp(nil) - - logger.Info("cleaning the upstream resource before calling the transformation") - - // Remove the syncer view diff annotation from the syncer view resource - annotations := cleanedUpstreamResource.GetAnnotations() - for name := range annotations { - if strings.HasPrefix(name, v1alpha1.InternalSyncerViewAnnotationPrefix) { - delete(annotations, name) - } - } - cleanedUpstreamResource.SetAnnotations(annotations) - - transformedSyncerViewResource := cleanedUpstreamResource - if transformation, err := srt.TransformationFor(upstreamResource); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get transformation from object upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(upstreamResource), upstreamResource.GetNamespace(), upstreamResource.GetName(), syncTargetKey, err)) - } else if transformation != nil { - logger.Info("calling the syncer transformation") - transformedSyncerViewResource, err = transformation.ToSyncerView(syncTargetKey, gvr, cleanedUpstreamResource, syncerViewFields, syncing) - if err != nil { - return nil, err - } - } - - if summarizingRules, err := srt.SummarizingRulesFor(upstreamResource); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get summarizing rules from object upstream resource %s|%s/%s for SyncTarget %s: %w", logicalcluster.From(upstreamResource), upstreamResource.GetNamespace(), upstreamResource.GetName(), syncTargetKey, err)) - } else if summarizingRules != nil { - logger.Info("applying summarizing rules") - - for _, field := range summarizingRules.FieldsToSummarize(gvr) { - logger := logger.WithValues("field", field.Path()) - existingSyncerViewValue, syncerViewValueExists := syncerViewFields[field.Path()] - - if field.CanPromoteToUpstream() { - if !syncerViewValueExists { - // Fields that can be promoted to the upstream resource (like status) are ALWAYS owned by the syncer. - // So if the value of the field is not explicitly overridden in the syncer view annotation, - // let's remove the field from the syncer view. - logger.Info("field can be promoted, but is not found in the syncer view fields annotation => delete the field from the transformed syncer view resource") - field.Delete(transformedSyncerViewResource) - continue - } - if existingSyncerViewValue == promotedToUpstream { - logger.Info("syncer view field is promoted => get the field value from the upstream resource") - promotedValueExists := false - existingSyncerViewValue, promotedValueExists, err = field.Get(upstreamResource) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to get promoted syncer view field %s on resource %s|%s/%s: %w", field.Path(), logicalcluster.From(upstreamResource), upstreamResource.GetNamespace(), upstreamResource.GetName(), err)) - } - if !promotedValueExists { - logger.Error(errors.New("promoted syncer view field does not exist: this should never happen"), errorMessage) - logger.Info("dropping invalid field") - field.Delete(transformedSyncerViewResource) - continue - } - } - logger.Info("overriding field with syncer view field value", "value", existingSyncerViewValue) - if err := field.Set(transformedSyncerViewResource, existingSyncerViewValue); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to override field %s on resource %s|%s/%s: %w", field.Path(), logicalcluster.From(upstreamResource), upstreamResource.GetNamespace(), upstreamResource.GetName(), err)) - } - continue - } - - // Field cannot be promoted to upstream and could possibly be owned by both the syncer and KCP - // In this case let's keep the upstream value of the field if no overriding value was provided by the syncer - if syncerViewValueExists { - // TODO(davidfestal): in the future, when we also summarize some Spec fields (like Ingress class), - // we might want to check the existence of the field in the transformedSyncerView, and decide which of both - // the transformed value and the previously-overriding value should be kept. - logger.Info("overriding field with syncer view field value", "value", existingSyncerViewValue) - if err := field.Set(transformedSyncerViewResource, existingSyncerViewValue); err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to override field %s on resource %s|%s/%s: %w", field.Path(), logicalcluster.From(transformedSyncerViewResource), transformedSyncerViewResource.GetNamespace(), transformedSyncerViewResource.GetName(), err)) - } - } - } - } - - upstreamResourceFinalizers := sets.New[string](upstreamResource.GetFinalizers()...) - syncerViewFinalizers := sets.New[string](transformedSyncerViewResource.GetFinalizers()...) - if upstreamResourceFinalizers.Has(syncerFinalizerName) { - logger.Info("propagating the syncer finalizer from the upstream resource to the transformed syncer view") - syncerViewFinalizers.Insert(syncerFinalizerName) - transformedSyncerViewResource.SetFinalizers(sets.List[string](syncerViewFinalizers)) - } - - if deletionTimestamp := syncing[syncTargetKey].DeletionTimestamp; deletionTimestamp != nil && syncing[syncTargetKey].Finalizers == "" { - // Only propagate the deletionTimestamp if no soft finalizers exist for this SyncTarget. - logger.Info("propagating the deletionTimestanmp annotation as the real deletionTimestamp of the transformed syncer view") - transformedSyncerViewResource.SetDeletionTimestamp(deletionTimestamp) - } - - workspaceURL, err := url.JoinPath(srt.ShardExternalURL, logicalcluster.From(upstreamResource).Path().RequestPath()) - if err != nil { - logger.Error(err, errorMessage) - return nil, kerrors.NewInternalError(fmt.Errorf("unable to build workspace URL for resource %s|%s/%s: %w", logicalcluster.From(transformedSyncerViewResource), transformedSyncerViewResource.GetNamespace(), transformedSyncerViewResource.GetName(), err)) - } - if annotations := transformedSyncerViewResource.GetAnnotations(); annotations != nil { - annotations[v1alpha1.InternalWorkspaceURLAnnotationKey] = workspaceURL - transformedSyncerViewResource.SetAnnotations(annotations) - } else { - transformedSyncerViewResource.SetAnnotations(map[string]string{ - v1alpha1.InternalWorkspaceURLAnnotationKey: workspaceURL, - }) - } - - logger.Info("resource transformed") - return transformedSyncerViewResource, nil -} diff --git a/tmc/pkg/virtual/syncer/transformations/transformer_test.go b/tmc/pkg/virtual/syncer/transformations/transformer_test.go deleted file mode 100644 index 7d33d895865..00000000000 --- a/tmc/pkg/virtual/syncer/transformations/transformer_test.go +++ /dev/null @@ -1,1009 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transformations - -import ( - "context" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/dynamic/fake" - clienttesting "k8s.io/client-go/testing" - - dynamiccontext "github.com/kcp-dev/kcp/pkg/virtual/framework/dynamic/context" - "github.com/kcp-dev/kcp/pkg/virtual/framework/transforming" - "github.com/kcp-dev/kcp/sdk/apis/workload/helpers" - "github.com/kcp-dev/kcp/sdk/client" - syncercontext "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/context" -) - -type mockedClusterClient struct { - client *fake.FakeDynamicClient - lclusterRecorder func(lcluster string) -} - -func (c *mockedClusterClient) Resource(resource schema.GroupVersionResource) kcpdynamic.ResourceClusterInterface { - return &mockedResourceClusterClient{ - resourceClient: resourceClient{ - client: c.client, - resource: resource, - resourceInterface: c.client.Resource(resource), - lclusterRecorder: c.lclusterRecorder, - }, - } -} - -func (c *mockedClusterClient) Cluster(cluster logicalcluster.Path) dynamic.Interface { - return &dynamicClient{ - client: c.client, - lcluster: cluster, - lclusterRecorder: c.lclusterRecorder, - } -} - -type mockedResourceClusterClient struct { - resourceClient -} - -func (c *mockedResourceClusterClient) Cluster(lcluster logicalcluster.Path) dynamic.NamespaceableResourceInterface { - return &namespaceableResourceClient{ - resourceClient: resourceClient{ - resourceInterface: c.client.Resource(c.resource), - client: c.client, - resource: c.resource, - lcluster: lcluster, - lclusterRecorder: c.lclusterRecorder, - }, - } -} - -func (c *mockedResourceClusterClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { - return c.resourceClient.List(ctx, opts) -} - -func (c *mockedResourceClusterClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - return c.resourceClient.Watch(ctx, opts) -} - -type dynamicClient struct { - client *fake.FakeDynamicClient - lcluster logicalcluster.Path - lclusterRecorder func(lcluster string) -} - -func (c *dynamicClient) ClusterName() string { return c.lcluster.String() } - -func (c *dynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface { - return &namespaceableResourceClient{ - resourceClient: resourceClient{ - resourceInterface: c.client.Resource(resource), - client: c.client, - lcluster: c.lcluster, - resource: resource, - lclusterRecorder: c.lclusterRecorder, - }, - } -} - -type namespaceableResourceClient struct { - resourceClient -} - -func (c *namespaceableResourceClient) ClusterName() string { return c.lcluster.String() } - -func (c *namespaceableResourceClient) Namespace(namespace string) dynamic.ResourceInterface { - return &resourceClient{ - resourceInterface: c.client.Resource(c.resource).Namespace(namespace), - client: c.client, - lcluster: c.lcluster, - resource: c.resource, - namespace: namespace, - lclusterRecorder: c.lclusterRecorder, - } -} - -type resourceClient struct { - resourceInterface dynamic.ResourceInterface - client *fake.FakeDynamicClient - lcluster logicalcluster.Path - resource schema.GroupVersionResource - namespace string - lclusterRecorder func(lcluster string) -} - -func (c *resourceClient) ClusterName() string { return c.lcluster.String() } - -func (c *resourceClient) RawDelete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) ([]byte, int, error) { - obj, _ := c.client.Tracker().Get(c.resource, c.namespace, name) - err := c.Delete(ctx, name, opts, subresources...) - bytes, _ := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) - return bytes, 0, err -} - -func (c *resourceClient) RawDeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) ([]byte, int, error) { - gvk := c.resource.GroupVersion().WithKind(strings.TrimRight(strings.Title(c.resource.Resource), "s")) //nolint:staticcheck - objs, _ := c.client.Tracker().List(c.resource, gvk, c.namespace) - list := objs.(*unstructured.UnstructuredList) - list.SetGroupVersionKind(schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind + "List"}) - list.SetResourceVersion("") - err := c.DeleteCollection(ctx, opts, listOptions) - bytes, _ := runtime.Encode(unstructured.UnstructuredJSONScheme, list) - return bytes, 0, err -} - -func (c *resourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Create(ctx, obj, options, subresources...) -} -func (c *resourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Update(ctx, obj, options, subresources...) -} -func (c *resourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.UpdateStatus(ctx, obj, options) -} -func (c *resourceClient) Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Delete(ctx, name, options, subresources...) -} -func (c *resourceClient) DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.DeleteCollection(ctx, options, listOptions) -} -func (c *resourceClient) Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Get(ctx, name, options, subresources...) -} -func (c *resourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.List(ctx, opts) -} -func (c *resourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Watch(ctx, opts) -} -func (c *resourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Patch(ctx, name, pt, data, options, subresources...) -} -func (c *resourceClient) Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.Apply(ctx, name, obj, options, subresources...) -} -func (c *resourceClient) ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error) { - c.lclusterRecorder(c.lcluster.String()) - return c.resourceInterface.ApplyStatus(ctx, name, obj, options) -} - -func gvr(group, version, resource string) schema.GroupVersionResource { - return schema.GroupVersionResource{ - Group: group, - Version: version, - Resource: resource, - } -} - -type resourceBuilder func() *unstructured.Unstructured - -func (rb resourceBuilder) clusterName(clusterName string) resourceBuilder { - return rb.annotation(logicalcluster.AnnotationKey, clusterName) -} - -func (rb resourceBuilder) annotation(key, value string) resourceBuilder { - return func() *unstructured.Unstructured { - r := rb.annotations()() - r.Object["metadata"].(map[string]interface{})["annotations"].(map[string]interface{})[key] = value - return r - } -} - -func (rb resourceBuilder) annotations() resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - metadata := r.Object["metadata"].(map[string]interface{}) - if _, exists := metadata["annotations"]; !exists { - metadata["annotations"] = map[string]interface{}{} - } - return r - } -} - -func (rb resourceBuilder) resourceVersion(rv string) resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - r.SetResourceVersion(rv) - return r - } -} - -func (rb resourceBuilder) deletionTimestamp(timestamp *metav1.Time) resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - r.SetDeletionTimestamp(timestamp) - return r - } -} - -func (rb resourceBuilder) label(key, value string) resourceBuilder { - return func() *unstructured.Unstructured { - r := rb.labels()() - r.Object["metadata"].(map[string]interface{})["labels"].(map[string]interface{})[key] = value - return r - } -} - -func (rb resourceBuilder) labels() resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - metadata := r.Object["metadata"].(map[string]interface{}) - if _, exists := metadata["labels"]; !exists { - metadata["labels"] = map[string]interface{}{} - } - return r - } -} - -func (rb resourceBuilder) finalizer(finalizer string) resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - finalizers := r.GetFinalizers() - finalizers = append(finalizers, finalizer) - r.SetFinalizers(finalizers) - return r - } -} - -func (rb resourceBuilder) finalizers() resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - finalizers := r.GetFinalizers() - if finalizers == nil { - r.SetFinalizers([]string{}) - } - return r - } -} - -func (rb resourceBuilder) field(name string, value interface{}) resourceBuilder { - return func() *unstructured.Unstructured { - r := rb() - r.Object[name] = value - return r - } -} - -func resource(apiVersion, kind, name string) resourceBuilder { - return func() *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": apiVersion, - "kind": kind, - "metadata": map[string]interface{}{ - "name": name, - }, - }, - } - } -} - -type mockedTransformation struct { - transform func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) -} - -func (mt *mockedTransformation) ToSyncerView(syncTargetKey string, gvr schema.GroupVersionResource, upstreamResource *unstructured.Unstructured, overridenSyncerViewFields map[string]interface{}, requestedSyncing map[string]helpers.SyncIntent) (newSyncerViewResource *unstructured.Unstructured, err error) { - return mt.transform(upstreamResource) -} - -func (mt *mockedTransformation) TransformationFor(resource metav1.Object) (Transformation, error) { - return mt, nil -} - -type mockedSummarizingRules struct { - fields []field -} - -func (msr *mockedSummarizingRules) FieldsToSummarize(gvr schema.GroupVersionResource) []FieldToSummarize { - result := make([]FieldToSummarize, 0, len(msr.fields)) - for _, f := range msr.fields { - result = append(result, FieldToSummarize(f)) - } - return result -} - -func (msr *mockedSummarizingRules) SummarizingRulesFor(resource metav1.Object) (SummarizingRules, error) { - return msr, nil -} - -var deletionTimestamp = metav1.Now() - -func TestSyncerResourceTransformer(t *testing.T) { - testCases := []struct { - name string - gvr schema.GroupVersionResource - availableResources []runtime.Object - action func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) - synctargetKey string - transform func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) - fieldsToSummarize []field - expectedResult interface{} - checkResult func(t *testing.T, watchTester *watch.FakeWatcher, result interface{}) - expectedError string - expectedClientActions []clienttesting.Action - }{ - { - name: "get - transform - summarize - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"status":{"statusField":"new"}}`)(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Get(ctx, "aThing", metav1.GetOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status"}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - }, - expectedResult: resource("group/version", "Resource", "aThing"). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - field("added", "value"). - field("status", map[string]interface{}{"statusField": "new"})(), - }, - { - name: "get - deletion requested - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("deletion.internal.workload.kcp.io/syncTargetKey", deletionTimestamp.Format(time.RFC3339))(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Get(ctx, "aThing", metav1.GetOptions{}) - }, - fieldsToSummarize: []field{{FieldPath: "status"}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - }, - expectedResult: resource("group/version", "Resource", "aThing"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("deletion.internal.workload.kcp.io/syncTargetKey", deletionTimestamp.Format(time.RFC3339)). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - deletionTimestamp(&deletionTimestamp)(), - }, - { - name: "update status - no promote - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"spec.field":"alreadyupdated"}`)(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.UpdateStatus(ctx, resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - field("status", map[string]interface{}{"statusField": "updated"})(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status"}, {FieldPath: "spec.field"}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - clienttesting.UpdateActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "update", - Resource: gvr("group", "version", "resources"), - Subresource: "status", - }, - Object: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("kcp.io/cluster", "onecluster"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"spec.field":"alreadyupdated","status":{"statusField":"updated"}}`)(), - }, - }, - expectedResult: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - field("status", map[string]interface{}{"statusField": "updated"}). - field("added", "value"). - field("spec", map[string]interface{}{"field": "alreadyupdated"})(), - }, - { - name: "update status - fail not owning", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"spec.field":"alreadyupdated"}`)(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.UpdateStatus(ctx, resource("group/version", "Resource", "aThing"). - field("status", map[string]interface{}{"statusField": "updated"})(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status"}, {FieldPath: "spec.field"}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - }, - expectedError: `Internal error occurred: tried to write resource resources/status(/aThing) though it is not owning it (syncer finalizer doesn't exist)`, - }, - { - name: "update status - promote - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - label("state.workload.kcp.io/syncTargetKey", "Sync")(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.UpdateStatus(ctx, resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - field("status", map[string]interface{}{"statusField": "updated"})(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status", PromoteToUpstream: true}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - clienttesting.UpdateActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "update", - Resource: gvr("group", "version", "resources"), - Subresource: "status", - }, - Object: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("kcp.io/cluster", "onecluster"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"status":"##promoted##"}`). - field("status", map[string]interface{}{"statusField": "updated"})(), - }, - }, - expectedResult: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - field("status", map[string]interface{}{"statusField": "updated"}). - field("added", "value")(), - }, - { - name: "update status - unpromote when new synctarget joining - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - label("state.workload.kcp.io/syncTargetKey2", ""). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"status":"##promoted##"}`). - field("status", map[string]interface{}{"statusField": "updated"})(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.UpdateStatus(ctx, resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - field("status", map[string]interface{}{"statusField": "updated"})(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status", PromoteToUpstream: true}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - clienttesting.UpdateActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "update", - Resource: gvr("group", "version", "resources"), - Subresource: "status", - }, - Object: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - label("state.workload.kcp.io/syncTargetKey2", ""). - annotation("kcp.io/cluster", "onecluster"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"status":{"statusField":"updated"}}`). - field("status", map[string]interface{}{"statusField": "updated"})(), - }, - }, - expectedResult: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - label("state.workload.kcp.io/syncTargetKey2", ""). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - field("status", map[string]interface{}{"statusField": "updated"}). - field("added", "value")(), - }, - { - name: "update - not found", - gvr: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "resources"}, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Update(ctx, resource("group/version", "Resource", "aThing"). - label("state.workload.kcp.io/syncTargetKey", "Sync")(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "added", "before") - return result, nil - }, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - }, - expectedError: `resources.group "aThing" not found`, - }, - { - name: "update - conflict", - gvr: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "resources"}, - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - resourceVersion("0001"). - label("state.workload.kcp.io/syncTargetKey", "Sync")(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Update(ctx, resource("group/version", "Resource", "aThing"). - resourceVersion("0002"). - label("state.workload.kcp.io/syncTargetKey", "Sync")(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "added", "before") - return result, nil - }, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - }, - expectedError: `Operation cannot be fulfilled on resources.group "aThing": the resource has been modified in the meantime`, - }, - { - name: "Remove syncer finalizer - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - annotation("deletion.internal.workload.kcp.io/syncTargetKey", deletionTimestamp.Format(time.RFC3339))(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Update(ctx, resource("group/version", "Resource", "aThing"). - deletionTimestamp(&deletionTimestamp)(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status"}, {FieldPath: "spec.field"}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - clienttesting.UpdateActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "update", - Resource: gvr("group", "version", "resources"), - }, - Object: resource("group/version", "Resource", "aThing").finalizers().labels(). - annotation("kcp.io/cluster", "onecluster")(), - }, - }, - expectedResult: resource("group/version", "Resource", "aThing").labels(). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - field("added", "value")(), - }, - { - name: "Remove syncer finalizer - should promote remaining synctarget status - success", - gvr: gvr("group", "version", "resources"), - synctargetKey: "syncTargetKey", - availableResources: []runtime.Object{ - resource("group/version", "Resource", "aThing").clusterName("onecluster"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - finalizer("workload.kcp.io/syncer-syncTargetKey2"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - label("state.workload.kcp.io/syncTargetKey2", "Sync"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"status":{"statusField":"updated"}}`). - annotation("diff.syncer.internal.kcp.io/syncTargetKey2", `{"status":{"statusField":"new"}}`). - annotation("deletion.internal.workload.kcp.io/syncTargetKey", deletionTimestamp.Format(time.RFC3339))(), - }, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Update(ctx, resource("group/version", "Resource", "aThing"). - annotation("deletion.internal.workload.kcp.io/syncTargetKey", deletionTimestamp.Format(time.RFC3339)). - deletionTimestamp(&deletionTimestamp)(), metav1.UpdateOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "value", "added") - return result, nil - }, - fieldsToSummarize: []field{{FieldPath: "status", PromoteToUpstream: true}}, - expectedClientActions: []clienttesting.Action{ - clienttesting.GetActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "get", - Resource: gvr("group", "version", "resources"), - }, - Name: "aThing", - }, - clienttesting.UpdateActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "update", - Resource: gvr("group", "version", "resources"), - Subresource: "status", - }, - Object: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey"). - finalizer("workload.kcp.io/syncer-syncTargetKey2"). - label("state.workload.kcp.io/syncTargetKey", "Sync"). - label("state.workload.kcp.io/syncTargetKey2", "Sync"). - annotation("kcp.io/cluster", "onecluster"). - annotation("deletion.internal.workload.kcp.io/syncTargetKey", deletionTimestamp.Format(time.RFC3339)). - annotation("diff.syncer.internal.kcp.io/syncTargetKey", `{"status":{"statusField":"updated"}}`). - annotation("diff.syncer.internal.kcp.io/syncTargetKey2", `{"status":"##promoted##"}`). - field("status", map[string]any{"statusField": string("new")})(), - }, - clienttesting.UpdateActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "update", - Resource: gvr("group", "version", "resources"), - }, - Object: resource("group/version", "Resource", "aThing"). - finalizer("workload.kcp.io/syncer-syncTargetKey2"). - label("state.workload.kcp.io/syncTargetKey2", "Sync"). - annotation("kcp.io/cluster", "onecluster"). - annotation("diff.syncer.internal.kcp.io/syncTargetKey2", `{"status":"##promoted##"}`). - field("status", map[string]any{"statusField": string("new")})(), - }, - }, - expectedResult: resource("group/version", "Resource", "aThing").labels(). - annotation("internal.workload.kcp.io/workspace-url", "https://localhost/clusters/onecluster"). - annotation("kcp.io/cluster", "onecluster"). - label("state.workload.kcp.io/syncTargetKey2", "Sync"). - field("added", "value")(), - }, - { - name: "watch - success", - gvr: schema.GroupVersionResource{Group: "group", Version: "version", Resource: "resources"}, - action: func(ctx context.Context, transformingClient dynamic.NamespaceableResourceInterface) (result interface{}, err error) { - return transformingClient.Watch(ctx, metav1.ListOptions{}) - }, - transform: func(resource *unstructured.Unstructured) (*unstructured.Unstructured, error) { - result := resource.DeepCopy() - _ = unstructured.SetNestedField(result.Object, "added "+result.GetName(), "after") - return result, nil - }, - // add fields here to see how they are overridden from the diff annotation - expectedClientActions: []clienttesting.Action{ - clienttesting.WatchActionImpl{ - ActionImpl: clienttesting.ActionImpl{ - Verb: "watch", - Resource: schema.GroupVersionResource{ - Group: "group", Version: "version", Resource: "resources", - }, - }, - WatchRestrictions: watchRestrictionsFromListOptions(metav1.ListOptions{}), - }, - }, - checkResult: func(t *testing.T, watchTester *watch.FakeWatcher, result interface{}) { - t.Helper() - - watcher, ok := result.(watch.Interface) - if !ok { - require.Fail(t, "result of Watch should be a watch.Interface") - } - - watchedError := &metav1.Status{ - Status: "Failure", - Message: "message", - } - checkWatchEvents(t, - watcher, - func() { - watchTester.Add(&unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThing", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - }, - }, - }, - }) - watchTester.Add(&unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThingMore", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - }, - }, - }, - }) - watchTester.Modify(&unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThing", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - }, - }, - }, - }) - watchTester.Delete(&unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThingMore", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - }, - }, - }, - }) - watchTester.Error(watchedError) - }, - []watch.Event{ - {Type: watch.Added, Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThing", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - "internal.workload.kcp.io/workspace-url": "https://localhost/clusters/onecluster", - }, - }, - "after": "added aThing", - }, - }}, - {Type: watch.Added, Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThingMore", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - "internal.workload.kcp.io/workspace-url": "https://localhost/clusters/onecluster", - }, - }, - "after": "added aThingMore", - }, - }}, - {Type: watch.Modified, Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThing", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - "internal.workload.kcp.io/workspace-url": "https://localhost/clusters/onecluster", - }, - }, - "after": "added aThing", - }, - }}, - {Type: watch.Deleted, Object: &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "group/version", - "kind": "Resource", - "metadata": map[string]interface{}{ - "name": "aThingMore", - "annotations": map[string]interface{}{ - "kcp.io/cluster": "onecluster", - "internal.workload.kcp.io/workspace-url": "https://localhost/clusters/onecluster", - }, - }, - "after": "added aThingMore", - }, - }}, - {Type: watch.Error, Object: watchedError}, - }) - }, - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - fakeClient := fake.NewSimpleDynamicClient(runtime.NewScheme(), test.availableResources...) - var lclustersRequestedInActions []string - clusterClient := &mockedClusterClient{ - client: fakeClient, - lclusterRecorder: func(lcluster string) { - lclustersRequestedInActions = append(lclustersRequestedInActions, lcluster) - }, - } - - rt := &SyncerResourceTransformer{ - ShardExternalURL: "https://localhost/", - } - if test.transform != nil { - rt.TransformationProvider = &mockedTransformation{ - test.transform, - } - } - if test.fieldsToSummarize != nil { - rt.SummarizingRulesProvider = &mockedSummarizingRules{ - test.fieldsToSummarize, - } - } - - fakeWatcher := watch.NewFake() - defer fakeWatcher.Stop() - fakeClient.PrependWatchReactor("resources", clienttesting.DefaultWatchReactor(fakeWatcher, nil)) - - transformingClient := transforming.WithResourceTransformer(clusterClient, rt) - ctx := syncercontext.WithSyncTargetKey(context.Background(), test.synctargetKey) - ctx = dynamiccontext.WithAPIDomainKey(ctx, dynamiccontext.APIDomainKey(client.ToClusterAwareKey(logicalcluster.NewPath("root:negotiation"), "SyncTargetName"))) - result, err := test.action(ctx, transformingClient.Cluster(logicalcluster.NewPath("")).Resource(test.gvr)) - - if test.expectedError != "" { - require.EqualError(t, err, test.expectedError, "error is wrong") - } else { - require.NoError(t, err, "error is wrong") - } - - if test.checkResult != nil { - test.checkResult(t, fakeWatcher, result) - } else if test.expectedResult != nil { - require.Empty(t, cmp.Diff(test.expectedResult, result, cmpopts.SortSlices(sortUnstructured)), "result is wrong") - } else { - require.Nil(t, result, "result is wrong") - } - require.Empty(t, cmp.Diff(test.expectedClientActions, fakeClient.Actions()), "client actions are wrong") - }) - } -} - -func sortUnstructured(a *unstructured.Unstructured, b *unstructured.Unstructured) bool { - return a.GetName() > b.GetName() -} - -func watchRestrictionsFromListOptions(options metav1.ListOptions) clienttesting.WatchRestrictions { - labelSelector, fieldSelector, _ := clienttesting.ExtractFromListOptions(options) - return clienttesting.WatchRestrictions{ - Labels: labelSelector, - Fields: fieldSelector, - } -} - -func checkWatchEvents(t *testing.T, watcher watch.Interface, addEvents func(), expectedEvents []watch.Event) { - t.Helper() - - watchingStarted := make(chan bool, 1) - go func() { - <-watchingStarted - addEvents() - }() - - watchingStarted <- true - watcherChan := watcher.ResultChan() - var event watch.Event - - for _, expectedEvent := range expectedEvents { - select { - case event = <-watcherChan: - case <-time.After(wait.ForeverTestTimeout): - require.Fail(t, "Watch event not received") - } - require.Equal(t, expectedEvent.Type, event.Type, "Event type is wrong") - require.True(t, equality.Semantic.DeepEqual(expectedEvent.Object, event.Object), cmp.Diff(expectedEvent.Object, event.Object)) - } -} diff --git a/tmc/pkg/virtual/syncer/transformations/types.go b/tmc/pkg/virtual/syncer/transformations/types.go deleted file mode 100644 index ec71508086d..00000000000 --- a/tmc/pkg/virtual/syncer/transformations/types.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package transformations - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kcp-dev/kcp/sdk/apis/workload/helpers" -) - -// Transformation defines the action of transforming an resource when exposing it -// to the Syncer for a given SyncTarget through the Syncer Virtual Workspace. -// That's why the transformed resource is called the Syncer View. -// -// In addition to the upstream resource to transform, the transformation parameters -// also involve: -// - the overriding values of Syncer View summarized fields (the fields that the -// -// Syncer previously overrode, typically when updating the status, but this could -// also contain specific Spec fields) -// - the requested syncing intents for all SyncTargets. -type Transformation interface { - ToSyncerView(SyncTargetKey string, gvr schema.GroupVersionResource, upstreamResource *unstructured.Unstructured, overridenSyncerViewFields map[string]interface{}, requestedSyncing map[string]helpers.SyncIntent) (newSyncerViewResource *unstructured.Unstructured, err error) -} - -// TransformationProvider provides an appropriate Transformation based on the content of a resource. -type TransformationProvider interface { - TransformationFor(resource metav1.Object) (Transformation, error) -} - -// SummarizingRules defines rules that drive the way some specified fields -// (typically the status, but not limited to it), when updated by the Syncer, -// are managed by the Syncer Virtual Workspace with 2 possible options: -// - either stored in the SyncerView (as overriding field values in an annotation) -// - or promoted to the upstream object itself. -type SummarizingRules interface { - FieldsToSummarize(gvr schema.GroupVersionResource) []FieldToSummarize -} - -// SummarizingRulesProvider provides appropriate SummarizingRules based on the content of a resource. -type SummarizingRulesProvider interface { - SummarizingRulesFor(resource metav1.Object) (SummarizingRules, error) -} - -// FieldToSummarize defines a Field that can be overridden by the Syncer for a given Synctarget, -// as well as the rules according to which it will be managed. -type FieldToSummarize interface { - Field - FieldSummarizingRules -} - -// Field defines a Field that can be overridden by the Syncer for a given Synctarget. -// This is an interface since they would be several ways to identify a field. -// First implementation is very limited and doesn't support lists (ex: "status", "spec.ClusterIP"). -// It could also support JsonPath. -type Field interface { - // Set allows setting the value of this field on a resource. - Set(resource *unstructured.Unstructured, value interface{}) error - // Get allows getting the value of this field from a resource. - // The retrieved value should be deep-copied before mutation. - Get(resource *unstructured.Unstructured) (interface{}, bool, error) - // Delete allows deleting this field from a resource - Delete(resource *unstructured.Unstructured) - // Path provides the path of this field, which will be used as the key - // in the map of overriding field values (for a SyncTarget), - // stored on the resource as an annotation. - Path() string -} - -// FieldSummarizingRules defines rules according to which the summarized field -// should be managed. -// More rules might be added in the future. -type FieldSummarizingRules interface { - // IsStatus if the field is part of the status sub-resource. - // This is important, since it drives how the field will be updated - // during the transformation (according to the subresource - // of the Update / Get action). - IsStatus() bool - // CanPromoteToUpstream defines if the field should be promoted to the upstream resource - // (when the resource is scheduled on only one Synctarget of course). - // Promoted fields are always owned by the Syncer. - CanPromoteToUpstream() bool -} diff --git a/tmc/pkg/virtual/syncer/upsyncer/storage_wrapper.go b/tmc/pkg/virtual/syncer/upsyncer/storage_wrapper.go deleted file mode 100644 index bf2c7b93981..00000000000 --- a/tmc/pkg/virtual/syncer/upsyncer/storage_wrapper.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsyncer - -import ( - "context" - "fmt" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/registry/rest" - - "github.com/kcp-dev/kcp/pkg/virtual/framework/forwardingregistry" -) - -// WithStaticLabelSelectorAndInWriteCallsCheck returns a StorageWrapper that adds the given label selector to the reading calls -// (Get, List and Watch), but also checks that write calls (Create or Update) are refused with an error if the resource -// would not be matched by the given label selector. -func WithStaticLabelSelectorAndInWriteCallsCheck(labelSelector labels.Requirements) forwardingregistry.StorageWrapper { - return forwardingregistry.StorageWrapperFunc( - func(resource schema.GroupResource, storage *forwardingregistry.StoreFuncs) { - delegateCreater := storage.CreaterFunc - storage.CreaterFunc = func(ctx context.Context, obj runtime.Object, createValidation rest.ValidateObjectFunc, options *metav1.CreateOptions) (runtime.Object, error) { - if meta, ok := obj.(metav1.Object); ok { - if !labels.Everything().Add(labelSelector...).Matches(labels.Set(meta.GetLabels())) { - return nil, apierrors.NewBadRequest(fmt.Sprintf("label selector %q does not match labels %v", labelSelector, meta.GetLabels())) - } - } - return delegateCreater.Create(ctx, obj, createValidation, options) - } - - delegateUpdater := storage.UpdaterFunc - storage.UpdaterFunc = func(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { - // Note, we have to pass in a non-nil value for oldObj. Ideally it would be the zero value of the - // appropriate type (e.g a built-in type such as corev1.Namespace, or Unstructured for a custom resource). - // Unfortunately we don't know what the appropriate type is here, so we're using Unstructured. The - // transformers called by UpdatedObject should only be acting on things that satisfy the ObjectMeta - // interface, so this should be ok. - obj, err := objInfo.UpdatedObject(ctx, &unstructured.Unstructured{}) - if apierrors.IsNotFound(err) { - return delegateUpdater.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) - } - if err != nil { - return nil, false, err - } - - if meta, ok := obj.(metav1.Object); ok { - if !labels.Everything().Add(labelSelector...).Matches(labels.Set(meta.GetLabels())) { - return nil, false, apierrors.NewBadRequest(fmt.Sprintf("label selector %q does not match labels %v", labelSelector, meta.GetLabels())) - } - } - return delegateUpdater.Update(ctx, name, objInfo, createValidation, updateValidation, forceAllowCreate, options) - } - - staticStorage := forwardingregistry.WithStaticLabelSelector(labelSelector) - staticStorage.Decorate(resource, storage) - }, - ) -} diff --git a/tmc/pkg/virtual/syncer/upsyncer/transformer.go b/tmc/pkg/virtual/syncer/upsyncer/transformer.go deleted file mode 100644 index 2f9eee3970b..00000000000 --- a/tmc/pkg/virtual/syncer/upsyncer/transformer.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package upsyncer - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - jsonpatch "github.com/evanphx/json-patch" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - - syncercontext "github.com/kcp-dev/kcp/tmc/pkg/virtual/syncer/context" -) - -// UpsyncDiffAnnotationPrefix is an internal annotation used on downstream resources to specify a transformation -// that should be applied during the Upsyncing of the resource to upstream. -// Format of the annotation is JSONPatch. -const UpsyncDiffAnnotationPrefix = "internal.workload.kcp.io/upsyncdiff" - -// UpsyncerResourceTransformer defines a very simple transformer which transforms the resource by applying a -// the JSON patch found in the `internal.workload.kcp.io/upsyncdiff` annotation. -type UpsyncerResourceTransformer struct{} - -func (rt *UpsyncerResourceTransformer) AfterRead(client dynamic.ResourceInterface, ctx context.Context, gvr schema.GroupVersionResource, upstreamResource *unstructured.Unstructured, eventType *watch.EventType, subresources ...string) (*unstructured.Unstructured, error) { - return upstreamResource, nil -} - -func (rt *UpsyncerResourceTransformer) BeforeWrite(client dynamic.ResourceInterface, ctx context.Context, gvr schema.GroupVersionResource, syncerViewResource *unstructured.Unstructured, subresources ...string) (*unstructured.Unstructured, error) { - syncTargetKey, err := syncercontext.SyncTargetKeyFrom(ctx) - if err != nil { - return nil, err - } - - diffPatch := syncerViewResource.GetAnnotations()[UpsyncDiffAnnotationPrefix+syncTargetKey] - if diffPatch == "" { - return syncerViewResource, nil - } - - // TODO(jmprusi): hacky way to validate the patch, we should rethink this. Also we should allow some - // modifications to annotations and labels, but not *all* labels. - lowerPatch := strings.ToLower(diffPatch) - if strings.Contains(lowerPatch, "/metadata") || strings.Contains(lowerPatch, "/apiversion") || strings.Contains(lowerPatch, "/kind") { - return nil, fmt.Errorf("metadata, apiversion or kind cannot be modified by a transformation") - } - - // TODO(jmprusi): Surface those errors to the user. - patch, err := jsonpatch.DecodePatch([]byte(diffPatch)) - if err != nil { - return nil, err - } - - upstreamResource := syncerViewResource.DeepCopy() - if err != nil { - return nil, err - } - upstreamResourceJSON, err := json.Marshal(upstreamResource) - if err != nil { - return nil, err - } - - // Apply the patch to the copy of the upstream resource. - patchedUpstreamResourceJSON, err := patch.Apply(upstreamResourceJSON) - if err != nil { - return nil, err - } - var newResource *unstructured.Unstructured - if err := json.Unmarshal(patchedUpstreamResourceJSON, &newResource); err != nil { - return nil, err - } - - // Remove the diff annotation. - annotations := newResource.GetAnnotations() - delete(annotations, UpsyncDiffAnnotationPrefix+syncTargetKey) - newResource.SetAnnotations(annotations) - return newResource, nil -} From fd3ed63ad71aa4a5dd8ab7f0f900de694c9e3a11 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 24 May 2023 12:30:20 +0200 Subject: [PATCH 02/15] add node regarding the code removal, remove non-goals from readme --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 419f91dd76c..8deab3bc01c 100644 --- a/README.md +++ b/README.md @@ -7,15 +7,13 @@ kcp is a Kubernetes-like control plane focusing on: - A **control plane** for many independent, **isolated** “clusters” known as **workspaces** - Enabling API service providers to **offer APIs centrally** using **multi-tenant operators** - Easy **API consumption** for users in their workspaces -- Flexible **scheduling** of workloads to physical clusters -- **Transparent movement** of workloads among compatible physical clusters -- **Advanced deployment strategies** for scenarios such as affinity/anti-affinity, geographic replication, cross-cloud - replication, etc. kcp can be a building block for SaaS service providers who need a **massively multi-tenant platform** to offer services to a large number of fully isolated tenants using Kubernetes-native APIs. The goal is to be useful to cloud providers as well as enterprise IT departments offering APIs within their company. +**NB:** In May 2023, the kcp project was restructured and components related to workload scheduling (e.g. the syncer) and the transparent multi cluster (tmc) code were removed due to lack of interest/maintainers. Please refer to the [`main-pre-tmc-removal` branch](https://github.com/kcp-dev/kcp/tree/main-pre-tmc-removal) if you are interested in the related code. + ## Documentation Please visit [docs.kcp.io/kcp](https://docs.kcp.io/kcp) for our documentation. From d6338d677b54af28c954653f02eac124ee4716ec Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 24 May 2023 14:28:52 +0200 Subject: [PATCH 03/15] remove e2e tests --- .../apiresourceschema_cowboys.yaml | 49 - .../locationworkspace/local_apiexport_test.go | 181 -- .../multiple_apiexports_test.go | 258 -- .../locationworkspace/rootcompute_test.go | 245 -- .../locationworkspace/synctarget_test.go | 247 -- .../reconciler/namespace/controller_test.go | 375 --- .../scheduling/api_compatibility_test.go | 89 - .../reconciler/scheduling/controller_test.go | 298 --- .../scheduling/multi_placements_test.go | 214 -- .../scheduling/placement_scheduler_test.go | 300 --- .../scheduling/upsynced_scheduling_test.go | 203 -- .../virtual/syncer/virtualworkspace_test.go | 2161 ----------------- 12 files changed, 4620 deletions(-) delete mode 100644 test/e2e/reconciler/locationworkspace/apiresourceschema_cowboys.yaml delete mode 100644 test/e2e/reconciler/locationworkspace/local_apiexport_test.go delete mode 100644 test/e2e/reconciler/locationworkspace/multiple_apiexports_test.go delete mode 100644 test/e2e/reconciler/locationworkspace/rootcompute_test.go delete mode 100644 test/e2e/reconciler/locationworkspace/synctarget_test.go delete mode 100644 test/e2e/reconciler/namespace/controller_test.go delete mode 100644 test/e2e/reconciler/scheduling/api_compatibility_test.go delete mode 100644 test/e2e/reconciler/scheduling/controller_test.go delete mode 100644 test/e2e/reconciler/scheduling/multi_placements_test.go delete mode 100644 test/e2e/reconciler/scheduling/placement_scheduler_test.go delete mode 100644 test/e2e/reconciler/scheduling/upsynced_scheduling_test.go delete mode 100644 test/e2e/virtual/syncer/virtualworkspace_test.go diff --git a/test/e2e/reconciler/locationworkspace/apiresourceschema_cowboys.yaml b/test/e2e/reconciler/locationworkspace/apiresourceschema_cowboys.yaml deleted file mode 100644 index d4629820a18..00000000000 --- a/test/e2e/reconciler/locationworkspace/apiresourceschema_cowboys.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: apis.kcp.io/v1alpha1 -kind: APIResourceSchema -metadata: - name: today.cowboys.wildwest.dev -spec: - group: wildwest.dev - names: - kind: Cowboy - listKind: CowboyList - plural: cowboys - singular: cowboy - scope: Namespaced - versions: - - name: v1alpha1 - schema: - description: Cowboy is part of the wild west - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: CowboySpec holds the desired state of the Cowboy. - properties: - intent: - type: string - # this is to ensure it will not be compatible in share environment - other-itent: - type: string - type: object - status: - description: CowboyStatus communicates the observed state of the Cowboy. - properties: - result: - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/test/e2e/reconciler/locationworkspace/local_apiexport_test.go b/test/e2e/reconciler/locationworkspace/local_apiexport_test.go deleted file mode 100644 index 7a901dd3b9f..00000000000 --- a/test/e2e/reconciler/locationworkspace/local_apiexport_test.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package locationworkspace - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpdiscovery "github.com/kcp-dev/client-go/discovery" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/yaml" - - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -// TestSyncTargetLocalExport is to test that user import kubernetes API in synctarget workspace and use it, -// instead of using global kubernetes APIExport. -// TODO(qiujian16) This might be removed when we do not support local export later. -func TestSyncTargetLocalExport(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - computePath, computeWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.WithName("compute"), framework.TODO_WithoutMultiShardSupport()) - computeClusterName := logicalcluster.Name(computeWorkspace.Spec.Cluster) - - kcpClients, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err, "failed to construct kcp cluster client for server") - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - syncTargetName := "synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", computePath) - syncTarget := framework.NewSyncerFixture(t, source, computePath, - framework.WithAPIExports(""), - framework.WithExtraResources("services"), - framework.WithSyncTargetName(syncTargetName), - framework.WithSyncedUserWorkspaces(computeWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - framework.Eventually(t, func() (bool, string) { - syncTarget, err := kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - require.NoError(t, err) - - if len(syncTarget.Status.SyncedResources) != 1 { - return false, fmt.Sprintf("expected 1 synced resources (services), got %v\n\n%s", syncTarget.Status.SyncedResources, toYAML(t, syncTarget)) - } - - if syncTarget.Status.SyncedResources[0].Resource != "services" || - syncTarget.Status.SyncedResources[0].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false, fmt.Sprintf("expected services resource, got %v\n\n%s", syncTarget.Status.SyncedResources, toYAML(t, syncTarget)) - } - - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - // create virtual workspace rest configs - rawConfig, err := source.RawConfig() - require.NoError(t, err) - virtualWorkspaceRawConfig := rawConfig.DeepCopy() - virtualWorkspaceRawConfig.Clusters["syncvervw"] = rawConfig.Clusters["base"].DeepCopy() - virtualWorkspaceRawConfig.Clusters["syncvervw"].Server = rawConfig.Clusters["base"].Server + "/services/syncer/" + computeClusterName.String() + "/" + syncTargetName + "/" + syncTarget.SyncerConfig.SyncTargetUID - virtualWorkspaceRawConfig.Contexts["syncvervw"] = rawConfig.Contexts["base"].DeepCopy() - virtualWorkspaceRawConfig.Contexts["syncvervw"].Cluster = "syncvervw" - virtualWorkspaceConfig, err := clientcmd.NewNonInteractiveClientConfig(*virtualWorkspaceRawConfig, "syncvervw", nil, nil).ClientConfig() - require.NoError(t, err) - virtualWorkspaceConfig = rest.AddUserAgent(rest.CopyConfig(virtualWorkspaceConfig), t.Name()) - - virtualWorkspaceiscoverClusterClient, err := kcpdiscovery.NewForConfig(virtualWorkspaceConfig) - require.NoError(t, err) - - t.Logf("Wait for service API from synctarget workspace to be served in synctarget virtual workspace.") - require.Eventually(t, func() bool { - _, existingAPIResourceLists, err := virtualWorkspaceiscoverClusterClient.ServerGroupsAndResources() - if err != nil { - return false - } - // requiredAPIResourceList includes all core APIs plus services API - return len(cmp.Diff([]*metav1.APIResourceList{ - requiredAPIResourceListWithService(computeClusterName, computeClusterName)}, sortAPIResourceList(existingAPIResourceLists))) == 0 - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Synctarget should be authorized to access downstream clusters") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - }, framework.Is(workloadv1alpha1.SyncerAuthorized)) - - t.Logf("Bind to location workspace") - framework.NewBindCompute(t, computePath, source, - framework.WithAPIExportsWorkloadBindOption(workloadv1alpha1.ImportedAPISExportName), - ).Bind(t) - - t.Logf("Wait for being able to list Services in the user workspace") - require.Eventually(t, func() bool { - _, err := kubeClusterClient.Cluster(computePath).CoreV1().Services("default").List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - t.Logf("service err %v", err) - return false - } else if err != nil { - t.Logf("service err %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Create a service in the user workspace") - _, err = kubeClusterClient.Cluster(computePath).CoreV1().Services("default").Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - Labels: map[string]string{ - "test.workload.kcp.io": syncTargetName, - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the service to be synced to the downstream cluster") - framework.Eventually(t, func() (bool, string) { - downstreamServices, err := syncTarget.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + syncTargetName, - }) - - if err != nil { - return false, fmt.Sprintf("Failed to list service: %v", err) - } - - if len(downstreamServices.Items) < 1 { - return false, "service is not synced" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) -} - -func toYAML(t *testing.T, obj interface{}) string { - t.Helper() - data, err := yaml.Marshal(obj) - require.NoError(t, err) - return string(data) -} diff --git a/test/e2e/reconciler/locationworkspace/multiple_apiexports_test.go b/test/e2e/reconciler/locationworkspace/multiple_apiexports_test.go deleted file mode 100644 index 16e6b7a2eb6..00000000000 --- a/test/e2e/reconciler/locationworkspace/multiple_apiexports_test.go +++ /dev/null @@ -1,258 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package locationworkspace - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpdiscovery "github.com/kcp-dev/client-go/discovery" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/endpoints/discovery" - "k8s.io/client-go/discovery/cached/memory" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" - - "github.com/kcp-dev/kcp/config/helpers" - kube124 "github.com/kcp-dev/kcp/config/rootcompute/kube-1.24" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - kubefixtures "github.com/kcp-dev/kcp/test/e2e/fixtures/kube" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestMultipleExports(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - computePath, computeWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - computeClusterName := logicalcluster.Name(computeWorkspace.Spec.Cluster) - - kcpClients, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err, "failed to construct kcp cluster client for server") - - dynamicClients, err := kcpdynamic.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err, "failed to construct dynamic cluster client for server") - - serviceSchemaPath, serviceSchemaWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - serviceSchemaClusterName := logicalcluster.Name(serviceSchemaWorkspace.Spec.Cluster) - - t.Logf("Install service APIResourceSchema into service schema workspace %q", serviceSchemaPath) - mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(kcpClients.Cluster(serviceSchemaPath).Discovery())) - err = helpers.CreateResourceFromFS(ctx, dynamicClients.Cluster(serviceSchemaPath), mapper, sets.New[string]("root-compute-workspace"), "apiresourceschema_services.yaml", kube124.KubeComputeFS) - require.NoError(t, err) - t.Logf("Create an APIExport for it") - serviceAPIExport := &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: "services", - }, - Spec: apisv1alpha1.APIExportSpec{ - LatestResourceSchemas: []string{"v124.services.core"}, - }, - } - _, err = kcpClients.Cluster(serviceSchemaPath).ApisV1alpha1().APIExports().Create(ctx, serviceAPIExport, metav1.CreateOptions{}) - require.NoError(t, err) - - ingressSchemaPath, ingressSchemaWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - t.Logf("Install ingress APIResourceSchema into ingress schema workspace %q", ingressSchemaPath) - mapper = restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(kcpClients.Cluster(ingressSchemaPath).Discovery())) - err = helpers.CreateResourceFromFS(ctx, dynamicClients.Cluster(ingressSchemaPath), mapper, sets.New[string]("root-compute-workspace"), "apiresourceschema_ingresses.networking.k8s.io.yaml", kube124.KubeComputeFS) - require.NoError(t, err) - t.Logf("Create an APIExport for it") - ingressAPIExport := &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ingresses", - }, - Spec: apisv1alpha1.APIExportSpec{ - LatestResourceSchemas: []string{"v124.ingresses.networking.k8s.io"}, - }, - } - _, err = kcpClients.Cluster(ingressSchemaPath).ApisV1alpha1().APIExports().Create(ctx, ingressAPIExport, metav1.CreateOptions{}) - require.NoError(t, err) - - syncTargetName := "synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", computePath) - syncTarget := framework.NewSyncerFixture(t, source, computePath, - framework.WithAPIExports(fmt.Sprintf("%s:%s", serviceSchemaPath.String(), serviceAPIExport.Name)), - framework.WithSyncTargetName(syncTargetName), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - if !isFakePCluster { - // Only need to install services - return - } - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err, "failed to create apiextensions client") - t.Logf("Installing test CRDs into sink cluster...") - kubefixtures.Create(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), - metav1.GroupResource{Group: "networking.k8s.io", Resource: "ingresses"}, - ) - require.NoError(t, err) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - t.Logf("syncTarget should have one resource to sync") - require.Eventually(t, func() bool { - syncTarget, err := kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - if err != nil { - return false - } - - if len(syncTarget.Status.SyncedResources) != 1 { - return false - } - - if syncTarget.Status.SyncedResources[0].Resource != "services" || - syncTarget.Status.SyncedResources[0].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Synctarget should be authorized to access downstream clusters") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - }, framework.Is(workloadv1alpha1.SyncerAuthorized)) - - t.Logf("Patch synctarget with new export") - patchData := fmt.Sprintf( - `{"spec":{"supportedAPIExports":[{"path":%q,"export":"services"},{"path":%q,"export":"ingresses"}]}}`, serviceSchemaPath.String(), ingressSchemaPath.String()) - _, err = kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Patch(ctx, syncTargetName, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) - require.NoError(t, err) - - require.Eventually(t, func() bool { - syncTarget, err := kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - if err != nil { - return false - } - - if len(syncTarget.Status.SyncedResources) != 2 { - return false - } - - if syncTarget.Status.SyncedResources[1].Resource != "services" || - syncTarget.Status.SyncedResources[1].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - - if syncTarget.Status.SyncedResources[0].Resource != "ingresses" || - syncTarget.Status.SyncedResources[0].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Synctarget should not be authorized to access downstream clusters") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - }, framework.IsNot(workloadv1alpha1.SyncerAuthorized)) - - t.Logf("Update clusterole so syncer can start to sync") - downstreamKubeClient := syncTarget.DownstreamKubeClient - require.Eventually(t, func() bool { - clusterRole, err := downstreamKubeClient.RbacV1().ClusterRoles().Get(ctx, syncTarget.SyncerID, metav1.GetOptions{}) - if err != nil { - return false - } - - clusterRole.Rules = append(clusterRole.Rules, rbacv1.PolicyRule{ - APIGroups: []string{"networking.k8s.io"}, - Resources: []string{"ingresses"}, - Verbs: []string{"*"}, - }) - - _, err = downstreamKubeClient.RbacV1().ClusterRoles().Update(ctx, clusterRole, metav1.UpdateOptions{}) - return err == nil - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Synctarget should be authorized to access downstream clusters") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - }, framework.Is(workloadv1alpha1.SyncerAuthorized)) - - // create virtual workspace rest configs - rawConfig, err := source.RawConfig() - require.NoError(t, err) - virtualWorkspaceRawConfig := rawConfig.DeepCopy() - virtualWorkspaceRawConfig.Clusters["syncvervw"] = rawConfig.Clusters["base"].DeepCopy() - virtualWorkspaceRawConfig.Clusters["syncvervw"].Server = rawConfig.Clusters["base"].Server + "/services/syncer/" + computeClusterName.String() + "/" + syncTargetName + "/" + syncTarget.SyncerConfig.SyncTargetUID - virtualWorkspaceRawConfig.Contexts["syncvervw"] = rawConfig.Contexts["base"].DeepCopy() - virtualWorkspaceRawConfig.Contexts["syncvervw"].Cluster = "syncvervw" - virtualWorkspaceConfig, err := clientcmd.NewNonInteractiveClientConfig(*virtualWorkspaceRawConfig, "syncvervw", nil, nil).ClientConfig() - require.NoError(t, err) - virtualWorkspaceConfig = rest.AddUserAgent(rest.CopyConfig(virtualWorkspaceConfig), t.Name()) - - virtualWorkspaceiscoverClusterClient, err := kcpdiscovery.NewForConfig(virtualWorkspaceConfig) - require.NoError(t, err) - framework.Eventually(t, func() (bool, string) { - _, existingAPIResourceLists, err := virtualWorkspaceiscoverClusterClient.ServerGroupsAndResources() - if err != nil { - return false, err.Error() - } - requiredIngressAPIResourceList := &metav1.APIResourceList{ - TypeMeta: metav1.TypeMeta{ - Kind: "APIResourceList", - APIVersion: "v1", - }, - GroupVersion: "networking.k8s.io/v1", - APIResources: []metav1.APIResource{ - { - Kind: "Ingress", - Name: "ingresses", - SingularName: "ingress", - ShortNames: []string{"ing"}, - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(logicalcluster.Name(ingressSchemaWorkspace.Spec.Cluster), "networking.k8s.io", "v1", "Ingress"), - }, - { - Kind: "Ingress", - Name: "ingresses/status", - SingularName: "", - Namespaced: true, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - }, - } - - diff := cmp.Diff([]*metav1.APIResourceList{requiredIngressAPIResourceList, requiredAPIResourceListWithService(computeClusterName, serviceSchemaClusterName)}, sortAPIResourceList(existingAPIResourceLists)) - return len(diff) == 0, diff - }, wait.ForeverTestTimeout, time.Millisecond*100) -} diff --git a/test/e2e/reconciler/locationworkspace/rootcompute_test.go b/test/e2e/reconciler/locationworkspace/rootcompute_test.go deleted file mode 100644 index e9dd4bbf3ed..00000000000 --- a/test/e2e/reconciler/locationworkspace/rootcompute_test.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package locationworkspace - -import ( - "context" - "fmt" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/stretchr/testify/require" - - appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/autoscaling/v1" - corev1 "k8s.io/api/core/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - discocache "k8s.io/client-go/discovery/cached" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/scale" - - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - kubefixtures "github.com/kcp-dev/kcp/test/e2e/fixtures/kube" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestRootComputeWorkspace(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - computePath, _ := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - consumerPath, consumerWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - - kcpClients, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err, "failed to construct kcp cluster client for server") - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - syncTargetName := "synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", computePath) - syncerFixture := framework.NewSyncerFixture(t, source, computePath, - framework.WithSyncTargetName(syncTargetName), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - if !isFakePCluster { - // Only need to install services - return - } - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err, "failed to create apiextensions client") - t.Logf("Installing test CRDs into sink cluster...") - kubefixtures.Create(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), - metav1.GroupResource{Group: "networking.k8s.io", Resource: "ingresses"}, - ) - require.NoError(t, err) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - require.Eventually(t, func() bool { - syncTarget, err := kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - if err != nil { - return false - } - - if len(syncTarget.Status.SyncedResources) != 5 { - return false - } - - if syncTarget.Status.SyncedResources[0].Resource != "services" || - syncTarget.Status.SyncedResources[0].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - if syncTarget.Status.SyncedResources[1].Resource != "pods" || - syncTarget.Status.SyncedResources[1].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - if syncTarget.Status.SyncedResources[2].Resource != "ingresses" || - syncTarget.Status.SyncedResources[2].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - if syncTarget.Status.SyncedResources[3].Resource != "endpoints" || - syncTarget.Status.SyncedResources[3].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - if syncTarget.Status.SyncedResources[4].Resource != "deployments" || - syncTarget.Status.SyncedResources[4].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Bind to location workspace") - framework.NewBindCompute(t, consumerPath, source, - framework.WithAPIExportsWorkloadBindOption("root:compute:kubernetes"), - framework.WithLocationWorkspaceWorkloadBindOption(computePath), - ).Bind(t) - - t.Logf("Wait for being able to list Services in the user workspace") - require.Eventually(t, func() bool { - _, err := kubeClusterClient.Cluster(consumerPath).CoreV1().Services("default").List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - t.Logf("service err %v", err) - return false - } else if err != nil { - t.Logf("service err %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Create a service in the user workspace") - _, err = kubeClusterClient.Cluster(consumerPath).CoreV1().Services("default").Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - Labels: map[string]string{ - "test.workload.kcp.io": syncTargetName, - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the service to be synced to the downstream cluster") - framework.Eventually(t, func() (bool, string) { - downstreamServices, err := syncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + syncTargetName, - }) - - if err != nil { - return false, fmt.Sprintf("Failed to list service: %v", err) - } - - if len(downstreamServices.Items) < 1 { - return false, "service is not synced" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Wait for being able to list Deployments in the user workspace") - framework.Eventually(t, func() (bool, string) { - _, err := kubeClusterClient.Cluster(consumerPath).AppsV1().Deployments("default").List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("deployment err %v", err) - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - var replicas int32 = 1 - t.Logf("Create a deployment in the user workspace") - _, err = kubeClusterClient.Cluster(consumerPath).AppsV1().Deployments("default").Create(ctx, &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - Labels: map[string]string{ - "test.workload.kcp.io": syncTargetName, - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "myapp"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "myapp"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "name", - Image: "image", - }, - }, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the deployment to be synced to the downstream cluster") - framework.Eventually(t, func() (bool, string) { - downstreamDeployments, err := syncerFixture.DownstreamKubeClient.AppsV1().Deployments("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + syncTargetName, - }) - - if err != nil { - return false, fmt.Sprintf("Failed to list deployment: %v", err) - } - - if len(downstreamDeployments.Items) < 1 { - return false, "deployment is not synced" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Scale the deployment in the upstream consumer workspace") - discoverClient := kubeClusterClient.Cluster(consumerPath).Discovery() - cachedDiscovery := discocache.NewMemCacheClient(discoverClient) - restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscovery) - scaleKindResolver := scale.NewDiscoveryScaleKindResolver(discoverClient) - scaleClient := scale.New(kubeClusterClient.Cluster(consumerPath).AppsV1().RESTClient(), restMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver) - _, err = scaleClient.Scales("default").Update(ctx, appsv1.SchemeGroupVersion.WithResource("deployments").GroupResource(), &v1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - }, - Spec: v1.ScaleSpec{ - Replicas: 2, - }, - }, metav1.UpdateOptions{}) - require.NoError(t, err, "deployment should support the scale subresource") -} diff --git a/test/e2e/reconciler/locationworkspace/synctarget_test.go b/test/e2e/reconciler/locationworkspace/synctarget_test.go deleted file mode 100644 index e17b328481c..00000000000 --- a/test/e2e/reconciler/locationworkspace/synctarget_test.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package locationworkspace - -import ( - "context" - "embed" - "fmt" - "sort" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpdiscovery "github.com/kcp-dev/client-go/discovery" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/endpoints/discovery" - "k8s.io/client-go/discovery/cached/memory" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/tools/clientcmd" - - "github.com/kcp-dev/kcp/config/helpers" - kube124 "github.com/kcp-dev/kcp/config/rootcompute/kube-1.24" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -//go:embed *.yaml -var testFiles embed.FS - -func TestSyncTargetExport(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - - schemaPath, schemaWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - schemaClusterName := logicalcluster.Name(schemaWorkspace.Spec.Cluster) - - computePath, computeWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - computeClusterName := logicalcluster.Name(computeWorkspace.Spec.Cluster) - - kcpClients, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err, "failed to construct kcp cluster client for server") - - dynamicClients, err := kcpdynamic.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err, "failed to construct dynamic cluster client for server") - - t.Logf("Install today service APIResourceSchema into schema workspace %q", schemaPath) - mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(kcpClients.Cluster(schemaPath).Discovery())) - err = helpers.CreateResourceFromFS(ctx, dynamicClients.Cluster(schemaPath), mapper, sets.New[string]("root-compute-workspace"), "apiresourceschema_services.yaml", kube124.KubeComputeFS) - require.NoError(t, err) - err = helpers.CreateResourceFromFS(ctx, dynamicClients.Cluster(schemaPath), mapper, nil, "apiresourceschema_cowboys.yaml", testFiles) - require.NoError(t, err) - - t.Logf("Create an APIExport for it") - cowboysAPIExport := &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{ - Name: "services", - }, - Spec: apisv1alpha1.APIExportSpec{ - LatestResourceSchemas: []string{"v124.services.core", "today.cowboys.wildwest.dev"}, - }, - } - _, err = kcpClients.Cluster(schemaPath).ApisV1alpha1().APIExports().Create(ctx, cowboysAPIExport, metav1.CreateOptions{}) - require.NoError(t, err) - - syncTargetName := "synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", computePath) - syncTarget := framework.NewSyncerFixture(t, source, computePath, - framework.WithAPIExports(fmt.Sprintf("%s:%s", schemaPath.String(), cowboysAPIExport.Name)), - framework.WithSyncTargetName(syncTargetName), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - require.Eventually(t, func() bool { - syncTarget, err := kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - if err != nil { - return false - } - - if len(syncTarget.Status.SyncedResources) != 2 { - return false - } - - if syncTarget.Status.SyncedResources[1].Resource != "services" || - syncTarget.Status.SyncedResources[1].State != workloadv1alpha1.ResourceSchemaAcceptedState { - return false - } - - if syncTarget.Status.SyncedResources[0].Resource != "cowboys" || - syncTarget.Status.SyncedResources[0].State != workloadv1alpha1.ResourceSchemaIncompatibleState { - return false - } - - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - // create virtual workspace rest configs - rawConfig, err := source.RawConfig() - require.NoError(t, err) - virtualWorkspaceRawConfig := rawConfig.DeepCopy() - virtualWorkspaceRawConfig.Clusters["syncvervw"] = rawConfig.Clusters["base"].DeepCopy() - virtualWorkspaceRawConfig.Clusters["syncvervw"].Server = rawConfig.Clusters["base"].Server + "/services/syncer/" + computeClusterName.String() + "/" + syncTargetName + "/" + syncTarget.SyncerConfig.SyncTargetUID - virtualWorkspaceRawConfig.Contexts["syncvervw"] = rawConfig.Contexts["base"].DeepCopy() - virtualWorkspaceRawConfig.Contexts["syncvervw"].Cluster = "syncvervw" - virtualWorkspaceConfig, err := clientcmd.NewNonInteractiveClientConfig(*virtualWorkspaceRawConfig, "syncvervw", nil, nil).ClientConfig() - require.NoError(t, err) - virtualWorkspaceConfig = rest.AddUserAgent(rest.CopyConfig(virtualWorkspaceConfig), t.Name()) - - virtualWorkspaceiscoverClusterClient, err := kcpdiscovery.NewForConfig(virtualWorkspaceConfig) - require.NoError(t, err) - - framework.Eventually(t, func() (bool, string) { - _, existingAPIResourceLists, err := virtualWorkspaceiscoverClusterClient.ServerGroupsAndResources() - if err != nil { - return false, err.Error() - } - - // requiredAPIResourceList includes all core APIs plus services API, cowboy API should not be included since it is - // not compatible to the synctarget. - - diff := cmp.Diff([]*metav1.APIResourceList{requiredAPIResourceListWithService(computeClusterName, schemaClusterName)}, sortAPIResourceList(existingAPIResourceLists)) - return len(diff) == 0, diff - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Synctarget should be authorized to access downstream clusters") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClients.Cluster(computePath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - }, framework.Is(workloadv1alpha1.SyncerAuthorized)) -} - -func sortAPIResourceList(list []*metav1.APIResourceList) []*metav1.APIResourceList { - sort.Sort(ByGroupVersion(list)) - for _, resource := range list { - sort.Sort(ByName(resource.APIResources)) - } - return list -} - -type ByGroupVersion []*metav1.APIResourceList - -func (a ByGroupVersion) Len() int { return len(a) } -func (a ByGroupVersion) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByGroupVersion) Less(i, j int) bool { return a[i].GroupVersion < a[j].GroupVersion } - -type ByName []metav1.APIResource - -func (a ByName) Len() int { return len(a) } -func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByName) Less(i, j int) bool { return a[i].Name < a[j].Name } - -func requiredAPIResourceListWithService(computeClusterName, serviceClusterName logicalcluster.Name) *metav1.APIResourceList { - return &metav1.APIResourceList{ - TypeMeta: metav1.TypeMeta{ - Kind: "APIResourceList", - }, - GroupVersion: "v1", - APIResources: []metav1.APIResource{ - { - Kind: "ConfigMap", - Name: "configmaps", - SingularName: "configmap", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(computeClusterName, "", "v1", "ConfigMap"), - }, - { - Kind: "Namespace", - Name: "namespaces", - SingularName: "namespace", - Namespaced: false, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(computeClusterName, "", "v1", "Namespace"), - }, - { - Kind: "Namespace", - Name: "namespaces/status", - SingularName: "", - Namespaced: false, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - { - Kind: "Secret", - Name: "secrets", - SingularName: "secret", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(computeClusterName, "", "v1", "Secret"), - }, - { - Kind: "ServiceAccount", - Name: "serviceaccounts", - SingularName: "serviceaccount", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(computeClusterName, "", "v1", "ServiceAccount"), - }, - { - Kind: "Service", - Name: "services", - SingularName: "service", - ShortNames: []string{"svc"}, - Categories: []string{"all"}, - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(serviceClusterName, "", "v1", "Service"), - }, - { - Kind: "Service", - Name: "services/status", - SingularName: "", - Namespaced: true, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - }, - } -} diff --git a/test/e2e/reconciler/namespace/controller_test.go b/test/e2e/reconciler/namespace/controller_test.go deleted file mode 100644 index 75a1fb52f31..00000000000 --- a/test/e2e/reconciler/namespace/controller_test.go +++ /dev/null @@ -1,375 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package namespace - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - kcpcache "github.com/kcp-dev/apimachinery/v2/pkg/cache" - kcpdynamic "github.com/kcp-dev/client-go/dynamic" - kcpkubernetesinformers "github.com/kcp-dev/client-go/informers" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - kcpapiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/kcp/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/retry" - - configcrds "github.com/kcp-dev/kcp/config/crds" - workloadnamespace "github.com/kcp-dev/kcp/pkg/reconciler/workload/namespace" - apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - clientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestNamespaceScheduler(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - type runningServer struct { - framework.RunningServer - path logicalcluster.Path - client kubernetes.Interface - kcpClient clientset.Interface - expect registerNamespaceExpectation - orgPath logicalcluster.Path - } - - var testCases = []struct { - name string - work func(ctx context.Context, t *testing.T, server runningServer) - }{ - { - name: "validate namespace scheduling", - work: func(ctx context.Context, t *testing.T, server runningServer) { - t.Helper() - t.Log("Create a namespace without a cluster available and expect it to be marked unschedulable") - namespace, err := server.client.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "e2e-nss-", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create namespace1") - server.RunningServer.Artifact(t, func() (runtime.Object, error) { - return server.client.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) - }) - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - ns, err := server.client.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) - return &workloadnamespace.NamespaceConditionsAdapter{Namespace: ns}, err - }, framework.IsNot(workloadnamespace.NamespaceScheduled).WithReason(workloadnamespace.NamespaceReasonUnschedulable)) - - t.Log("Create the SyncTarget and start both the Syncer APIImporter and Syncer HeartBeat") - // Create the SyncTarget and start both the Syncer APIImporter and Syncer HeartBeat against a workload cluster - // so that there's a ready cluster to schedule to. - syncerFixture := framework.NewSyncerFixture(t, server, server.path).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - syncTargetName := syncerFixture.SyncerConfig.SyncTargetName - - t.Logf("Bind to location workspace") - framework.NewBindCompute(t, server.path, server).Bind(t) - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncerFixture.SyncTargetClusterName, syncerFixture.SyncerConfig.SyncTargetName) - - t.Log("Wait until the namespace is scheduled to the workload cluster") - require.Eventually(t, func() bool { - ns, err := server.client.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) - if err != nil { - t.Log(err) - return false - } - return scheduledMatcher(syncTargetKey)(ns) == nil - }, wait.ForeverTestTimeout, 100*time.Millisecond) - - t.Log("Cordon the cluster and expect the namespace to end up unschedulable") - err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { - syncTarget, err := server.kcpClient.WorkloadV1alpha1().SyncTargets().Get(ctx, syncTargetName, metav1.GetOptions{}) - if err != nil { - return err - } - anHourAgo := metav1.NewTime(time.Now().Add(-1 * time.Hour)) - syncTarget.Spec.EvictAfter = &anHourAgo - _, err = server.kcpClient.WorkloadV1alpha1().SyncTargets().Update(ctx, syncTarget, metav1.UpdateOptions{}) - return err - }) - require.NoError(t, err, "failed to update cluster1") - - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - ns, err := server.client.CoreV1().Namespaces().Get(ctx, namespace.Name, metav1.GetOptions{}) - return &workloadnamespace.NamespaceConditionsAdapter{Namespace: ns}, err - }, framework.IsNot(workloadnamespace.NamespaceScheduled).WithReason(workloadnamespace.NamespaceReasonUnschedulable), "did not see namespace marked unschededuled") - }, - }, - { - name: "GVRs are removed, and then quickly re-added to a new workspace", - work: func(ctx context.Context, t *testing.T, server runningServer) { - t.Helper() - - crdClusterClient, err := kcpapiextensionsclientset.NewForConfig(server.RunningServer.BaseConfig(t)) - require.NoError(t, err, "failed to construct apiextensions client for server") - - dynamicClusterClient, err := kcpdynamic.NewForConfig(server.RunningServer.BaseConfig(t)) - require.NoError(t, err, "failed to construct dynamic client for server") - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(server.RunningServer.BaseConfig(t)) - require.NoError(t, err, "failed to construct kubernetes client for server") - - t.Log("Create a ready SyncTarget, and keep it artificially ready") // we don't want the syncer to do anything with CRDs, hence we fake the syncer - cluster := &workloadv1alpha1.SyncTarget{ - ObjectMeta: metav1.ObjectMeta{Name: "cluster7"}, - Spec: workloadv1alpha1.SyncTargetSpec{ - SupportedAPIExports: []tenancyv1alpha1.APIExportReference{ - { - Export: workloadv1alpha1.ImportedAPISExportName, - Path: server.path.String(), - }, - }, - }, - } - cluster, err = server.kcpClient.WorkloadV1alpha1().SyncTargets().Create(ctx, cluster, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create cluster") - - apiExport := &apisv1alpha1.APIExport{ - ObjectMeta: metav1.ObjectMeta{Name: workloadv1alpha1.ImportedAPISExportName}, - Spec: apisv1alpha1.APIExportSpec{}, - } - _, err = server.kcpClient.ApisV1alpha1().APIExports().Create(ctx, apiExport, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create APIExport") - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.From(cluster), cluster.Name) - - go wait.UntilWithContext(ctx, func(ctx context.Context) { - patchBytes := []byte(fmt.Sprintf(`[{"op":"replace","path":"/status/lastSyncerHeartbeatTime","value":%q}]`, time.Now().Format(time.RFC3339))) - _, err := server.kcpClient.WorkloadV1alpha1().SyncTargets().Patch(ctx, cluster.Name, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status") - if err != nil { - // we can survive several of these errors. If 6 in a row fail and the sync target is marked - // non-ready, we likely have other problems than these failures here. - t.Logf("failed to set status.lastSyncerHeartbeatTime: %v", err) - return - } - }, 100*time.Millisecond) - - t.Logf("Bind to location workspace") - framework.NewBindCompute(t, server.path, server, - framework.WithAPIExportsWorkloadBindOption(workloadv1alpha1.ImportedAPISExportName), - ).Bind(t) - - t.Log("Create a new unique sheriff CRD") - group := framework.UniqueGroup(".io") - crd := newSheriffCRD(group) - gvr := schema.GroupVersionResource{ - Group: crd.Spec.Group, - Version: crd.Spec.Versions[0].Name, - Resource: crd.Spec.Names.Plural, - } - err = configcrds.CreateSingle(ctx, crdClusterClient.ApiextensionsV1().CustomResourceDefinitions().Cluster(server.path), crd) - require.NoError(t, err, "error bootstrapping CRD %s in cluster %s", crd.Name, server.path) - require.Eventually(t, func() bool { - _, err := dynamicClusterClient.Cluster(server.path).Resource(gvr).Namespace("").List(ctx, metav1.ListOptions{}) - return err == nil - }, wait.ForeverTestTimeout, time.Millisecond*100, "failed to see CRD in cluster") - - t.Log("Create a sheriff and wait for it to be scheduled") - _, err = dynamicClusterClient.Cluster(server.path).Resource(gvr).Namespace("default").Create(ctx, newSheriff(group, "woody"), metav1.CreateOptions{}) - require.NoError(t, err, "failed to create sheriff") - require.Eventually(t, func() bool { - obj, err := dynamicClusterClient.Cluster(server.path).Resource(gvr).Namespace("default").Get(ctx, "woody", metav1.GetOptions{}) - if err != nil { - t.Logf("failed to get sheriff: %v", err) - return false - } - return obj.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] != "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "failed to see sheriff scheduled") - - t.Log("Delete the sheriff and the sheriff CRD") - err = dynamicClusterClient.Cluster(server.path).Resource(gvr).Namespace("default").Delete(ctx, "woody", metav1.DeleteOptions{}) - require.NoError(t, err, "failed to delete sheriff") - err = crdClusterClient.Cluster(server.path).ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, crd.Name, metav1.DeleteOptions{}) - require.NoError(t, err, "failed to delete CRD") - - time.Sleep(7 * time.Second) // this must be longer than discovery repoll interval (5s in tests) - - t.Log("Recreate the CRD, and then quickly a namespace and a CR whose CRD was just recreated") - err = configcrds.CreateSingle(ctx, crdClusterClient.ApiextensionsV1().CustomResourceDefinitions().Cluster(server.path), crd) - require.NoError(t, err, "error bootstrapping CRD %s in cluster %s", crd.Name, server.path) - _, err = kubeClusterClient.Cluster(server.path).CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "namespace-test"}}, metav1.CreateOptions{}) - require.NoError(t, err, "failed to create namespace") - _, err = dynamicClusterClient.Cluster(server.path).Resource(gvr).Namespace("default").Create(ctx, newSheriff(group, "lucky-luke"), metav1.CreateOptions{}) - require.NoError(t, err, "failed to create sheriff") - - t.Log("Now also the sheriff should be scheduled") - require.Eventually(t, func() bool { - obj, err := dynamicClusterClient.Cluster(server.path).Resource(gvr).Namespace("default").Get(ctx, "lucky-luke", metav1.GetOptions{}) - if err != nil { - t.Logf("failed to get sheriff: %v", err) - return false - } - return obj.GetLabels()[workloadv1alpha1.ClusterResourceStateLabelPrefix+syncTargetKey] != "" - }, wait.ForeverTestTimeout, time.Millisecond*100, "failed to see sheriff scheduled") - }, - }, - } - - server := framework.SharedKcpServer(t) - orgPath, _ := framework.NewOrganizationFixture(t, server, framework.TODO_WithoutMultiShardSupport()) - - for _, testCase := range testCases { - testCase := testCase - - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - start := time.Now() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - cfg := server.BaseConfig(t) - - path, _ := framework.NewWorkspaceFixture(t, server, orgPath, framework.TODO_WithoutMultiShardSupport()) - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(cfg) - require.NoError(t, err) - - kcpClusterClient, err := kcpclientset.NewForConfig(cfg) - require.NoError(t, err) - - expecterClient, err := kcpkubernetesclientset.NewForConfig(server.RootShardSystemMasterBaseConfig(t)) - require.NoError(t, err) - - t.Logf("Starting namespace expecter") - expect, err := expectNamespaces(ctx, t, expecterClient) - require.NoError(t, err, "failed to start expecter") - - s := runningServer{ - RunningServer: server, - path: path, - client: kubeClusterClient.Cluster(path), - kcpClient: kcpClusterClient.Cluster(path), - expect: expect, - orgPath: orgPath, - } - - t.Logf("Set up clients for test after %s", time.Since(start)) - t.Log("Starting test...") - - testCase.work(ctx, t, s) - }, - ) - } -} - -type namespaceExpectation func(*corev1.Namespace) error - -func scheduledMatcher(target string) namespaceExpectation { - return func(object *corev1.Namespace) error { - if _, found := object.Labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+target]; found { - return nil - } - return fmt.Errorf("expected a scheduled namespace, got status.conditions: %#v", object.Status.Conditions) - } -} - -type registerNamespaceExpectation func(seed *corev1.Namespace, expectation namespaceExpectation) error - -func expectNamespaces(ctx context.Context, t *testing.T, client kcpkubernetesclientset.ClusterInterface) (registerNamespaceExpectation, error) { - t.Helper() - - informerFactory := kcpkubernetesinformers.NewSharedInformerFactory(client, 0) - informer := informerFactory.Core().V1().Namespaces() - expecter := framework.NewExpecter(informer.Informer()) - informerFactory.Start(ctx.Done()) - if !cache.WaitForNamedCacheSync(t.Name(), ctx.Done(), informer.Informer().HasSynced) { - return nil, errors.New("failed to wait for caches to sync") - } - return func(seed *corev1.Namespace, expectation namespaceExpectation) error { - key, err := kcpcache.MetaClusterNamespaceKeyFunc(seed) - if err != nil { - return err - } - clusterName, _, name, err := kcpcache.SplitMetaClusterNamespaceKey(key) - if err != nil { - return err - } - return expecter.ExpectBefore(ctx, func(ctx context.Context) (done bool, err error) { - current, err := informer.Lister().Cluster(clusterName).Get(name) - if err != nil { - // Retry on all errors - return false, err - } - expectErr := expectation(current.DeepCopy()) - return expectErr == nil, expectErr - }, wait.ForeverTestTimeout) - }, nil -} - -func newSheriffCRD(group string) *apiextensionsv1.CustomResourceDefinition { - return &apiextensionsv1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("sheriffs.%s", group), - }, - Spec: apiextensionsv1.CustomResourceDefinitionSpec{ - Group: group, - Names: apiextensionsv1.CustomResourceDefinitionNames{ - Plural: "sheriffs", - Singular: "sheriff", - Kind: "Sheriff", - ListKind: "SheriffList", - }, - Scope: "Namespaced", - Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ - { - Name: "v1", - Served: true, - Storage: true, - Schema: &apiextensionsv1.CustomResourceValidation{ - OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ - Type: "object", - }, - }, - }, - }, - }, - } -} - -func newSheriff(group string, name string) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": group + "/v1", - "kind": "Sheriff", - "metadata": map[string]interface{}{ - "name": name, - }, - }, - } -} diff --git a/test/e2e/reconciler/scheduling/api_compatibility_test.go b/test/e2e/reconciler/scheduling/api_compatibility_test.go deleted file mode 100644 index 1d4d030951c..00000000000 --- a/test/e2e/reconciler/scheduling/api_compatibility_test.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "math/rand" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestSchedulingOnSupportedAPI(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - locationPath, locationWS := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - userPath, userWS := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - - kcpClusterClient, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - firstSyncTargetName := fmt.Sprintf("firstsynctarget-%d", +rand.Intn(1000000)) - t.Logf("Creating a SyncTarget with no supported APIExports in %s, and start both the Syncer APIImporter and Syncer HeartBeat", locationPath) - _ = framework.NewSyncerFixture(t, source, locationPath, - framework.WithSyncTargetName(firstSyncTargetName), - framework.WithSyncedUserWorkspaces(userWS), - framework.WithAPIExports(""), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - secondSyncTargetName := fmt.Sprintf("secondsynctarget-%d", +rand.Intn(1000000)) - t.Logf("Creating a SyncTarget with global kubernetes APIExports in %s,and start both the Syncer APIImporter and Syncer HeartBeat", locationPath) - _ = framework.NewSyncerFixture(t, source, locationPath, - framework.WithSyncTargetName(secondSyncTargetName), - framework.WithSyncedUserWorkspaces(userWS), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - placementName := "placement-test-supportedapi" - t.Logf("Bind to location workspace") - framework.NewBindCompute(t, userPath, source, - framework.WithLocationWorkspaceWorkloadBindOption(locationPath), - framework.WithPlacementNameBindOption(placementName), - framework.WithAPIExportsWorkloadBindOption("root:compute:kubernetes"), - ).Bind(t) - - t.Logf("First sync target hash: %s", workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name(locationWS.Spec.Cluster), firstSyncTargetName)) - scheduledSyncTargetKey := workloadv1alpha1.ToSyncTargetKey(logicalcluster.Name(locationWS.Spec.Cluster), secondSyncTargetName) - - t.Logf("check placement should be scheduled to synctarget with supported API") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, placementName, metav1.GetOptions{}) - }, framework.Is(schedulingv1alpha1.PlacementScheduled)) - placement, err := kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, placementName, metav1.GetOptions{}) - require.NoError(t, err) - - if value := placement.Annotations[workloadv1alpha1.InternalSyncTargetPlacementAnnotationKey]; value != scheduledSyncTargetKey { - t.Errorf("Internal synctarget annotation for placement should be %s since it is the only SyncTarget with compatible API, but got %q", - scheduledSyncTargetKey, value) - } -} diff --git a/test/e2e/reconciler/scheduling/controller_test.go b/test/e2e/reconciler/scheduling/controller_test.go deleted file mode 100644 index 75410782bce..00000000000 --- a/test/e2e/reconciler/scheduling/controller_test.go +++ /dev/null @@ -1,298 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "sigs.k8s.io/yaml" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestScheduling(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - negotiationPath, _ := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - userPath, userWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - secondUserPath, secondUserWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - kcpClusterClient, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - t.Logf("Check that there is no services resource in the user workspace") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - require.Error(t, err) - - t.Logf("Check that there is no services resource in the second user workspace") - _, err = kubeClusterClient.Cluster(secondUserPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - require.Error(t, err) - - syncTargetName := "synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", negotiationPath) - syncerFixture := framework.NewSyncerFixture(t, source, negotiationPath, - framework.WithSyncTargetName(syncTargetName), - framework.WithSyncedUserWorkspaces(userWorkspace, secondUserWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - t.Logf("Wait for APIResourceImports to show up in the negotiation workspace") - require.Eventually(t, func() bool { - imports, err := kcpClusterClient.Cluster(negotiationPath).ApiresourceV1alpha1().APIResourceImports().List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Failed to list APIResourceImports: %v", err) - return false - } - - return len(imports.Items) > 0 - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Wait for NegotiatedAPIResources to show up in the negotiation workspace") - require.Eventually(t, func() bool { - resources, err := kcpClusterClient.Cluster(negotiationPath).ApiresourceV1alpha1().NegotiatedAPIResources().List(ctx, metav1.ListOptions{}) - if err != nil { - t.Logf("Failed to list NegotiatedAPIResources: %v", err) - return false - } - - return len(resources.Items) > 0 - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Log("Create a location in the negotiation workspace") - location := &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "us-east1", - Labels: map[string]string{"foo": "42"}, - }, - Spec: schedulingv1alpha1.LocationSpec{ - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - }, - } - _, err = kcpClusterClient.Cluster(negotiationPath).SchedulingV1alpha1().Locations().Create(ctx, location, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for available instances in the location") - framework.Eventually(t, func() (bool, string) { - location, err := kcpClusterClient.Cluster(negotiationPath).SchedulingV1alpha1().Locations().Get(ctx, location.Name, metav1.GetOptions{}) - require.NoError(t, err) - if location.Status.AvailableInstances == nil { - return false, "location.Status.AvailableInstances not present" - } - if actual, expected := *location.Status.AvailableInstances, uint32(1); actual != expected { - return false, fmt.Sprintf("location.Status.AvailableInstances is %d, not %d", actual, expected) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Bind to location workspace") - framework.NewBindCompute(t, userPath, source, - framework.WithLocationWorkspaceWorkloadBindOption(negotiationPath), - ).Bind(t) - - t.Logf("Wait for being able to list Services in the user workspace") - require.Eventually(t, func() bool { - _, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Bind second user workspace to location workspace") - framework.NewBindCompute(t, secondUserPath, source, - framework.WithLocationWorkspaceWorkloadBindOption(negotiationPath), - ).Bind(t) - - t.Logf("Wait for being able to list Services in the user workspace") - require.Eventually(t, func() bool { - _, err := kubeClusterClient.Cluster(secondUserPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - syncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncerFixture.SyncTargetClusterName, syncTargetName) - - t.Logf("Create a service in the user workspace") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Sync", - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Create a service in the second user workspace") - _, err = kubeClusterClient.Cluster(secondUserPath).CoreV1().Services("default").Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "second", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Sync", - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the 2 services to be sync to the downstream cluster") - var downstreamServices *corev1.ServiceList - require.Eventually(t, func() bool { - downstreamServices, err = syncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "internal.workload.kcp.io/cluster=" + syncTargetKey, - }) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } else if len(downstreamServices.Items) < 2 { - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - syncedServicesYaml, err := yaml.Marshal(downstreamServices) - require.NoError(t, err) - t.Logf("Synced services:\n%s", syncedServicesYaml) - - require.Len(t, downstreamServices.Items, 2) - - names := sets.New[string]() - for _, downstreamService := range downstreamServices.Items { - names.Insert(downstreamService.Name) - } - require.Equal(t, sets.List[string](names), []string{"first", "second"}) - - t.Logf("Wait for placement annotation on the default namespace") - framework.Eventually(t, func() (bool, string) { - ns, err := kubeClusterClient.Cluster(userPath).CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{}) - require.NoError(t, err) - - _, found := ns.Annotations[schedulingv1alpha1.PlacementAnnotationKey] - return found, fmt.Sprintf("no %s annotation:\n%s", schedulingv1alpha1.PlacementAnnotationKey, ns.Annotations) - }, wait.ForeverTestTimeout, time.Millisecond*100) -} - -// TestSchedulingWhenLocationIsMissing create placement at first when location is missing and created later. -func TestSchedulingWhenLocationIsMissing(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - locationPath, _ := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - userPath, userWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - - kcpClusterClient, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - // create a placement at first - newPlacement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "new-placement", - }, - Spec: schedulingv1alpha1.PlacementSpec{ - LocationSelectors: []metav1.LabelSelector{{}}, - NamespaceSelector: &metav1.LabelSelector{}, - LocationResource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - LocationWorkspace: locationPath.String(), - }, - } - _, err = kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Create(ctx, newPlacement, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Placement should turn to pending phase") - framework.Eventually(t, func() (bool, string) { - placement, err := kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, newPlacement.Name, metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get placement: %v", err) - } - - return placement.Status.Phase == schedulingv1alpha1.PlacementPending, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - syncTargetName := "synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", locationPath) - _ = framework.NewSyncerFixture(t, source, locationPath, - framework.WithSyncTargetName(syncTargetName), - framework.WithSyncedUserWorkspaces(userWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - t.Logf("Wait for placement to be ready") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, newPlacement.Name, metav1.GetOptions{}) - }, framework.Is(schedulingv1alpha1.PlacementReady)) -} diff --git a/test/e2e/reconciler/scheduling/multi_placements_test.go b/test/e2e/reconciler/scheduling/multi_placements_test.go deleted file mode 100644 index a1d9213f152..00000000000 --- a/test/e2e/reconciler/scheduling/multi_placements_test.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestMultiPlacement(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - locationPath, _ := framework.NewWorkspaceFixture(t, source, orgPath, framework.WithName("location"), framework.TODO_WithoutMultiShardSupport()) - userPath, userWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.WithName("user"), framework.TODO_WithoutMultiShardSupport()) - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - kcpClusterClient, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - t.Logf("Check that there is no services resource in the user workspace") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - require.Error(t, err) - - firstSyncTargetName := "first-synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", locationPath) - firstSyncerFixture := framework.NewSyncerFixture(t, source, locationPath, - framework.WithSyncTargetName(firstSyncTargetName), - framework.WithSyncedUserWorkspaces(userWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - secondSyncTargetName := "second-synctarget" - t.Logf("Creating a SyncTarget and syncer in %s", locationPath) - secondSyncerFixture := framework.NewSyncerFixture(t, source, locationPath, - framework.WithSyncTargetName(secondSyncTargetName), - framework.WithSyncedUserWorkspaces(userWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - t.Log("Label synctarget") - patchData1 := `{"metadata":{"labels":{"loc":"loc1"}}}` - _, err = kcpClusterClient.Cluster(locationPath).WorkloadV1alpha1().SyncTargets().Patch(ctx, firstSyncTargetName, types.MergePatchType, []byte(patchData1), metav1.PatchOptions{}) - require.NoError(t, err) - patchData2 := `{"metadata":{"labels":{"loc":"loc2"}}}` - _, err = kcpClusterClient.Cluster(locationPath).WorkloadV1alpha1().SyncTargets().Patch(ctx, secondSyncTargetName, types.MergePatchType, []byte(patchData2), metav1.PatchOptions{}) - require.NoError(t, err) - - t.Log("Create locations") - loc1 := &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "loc1", - Labels: map[string]string{"loc": "loc1"}, - }, - Spec: schedulingv1alpha1.LocationSpec{ - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - InstanceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"loc": "loc1"}, - }, - }, - } - _, err = kcpClusterClient.Cluster(locationPath).SchedulingV1alpha1().Locations().Create(ctx, loc1, metav1.CreateOptions{}) - require.NoError(t, err) - - loc2 := &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "loc2", - Labels: map[string]string{"loc": "loc2"}, - }, - Spec: schedulingv1alpha1.LocationSpec{ - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - InstanceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"loc": "loc2"}, - }, - }, - } - _, err = kcpClusterClient.Cluster(locationPath).SchedulingV1alpha1().Locations().Create(ctx, loc2, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Bind user workspace to location workspace with loc 1") - framework.NewBindCompute(t, userPath, source, - framework.WithLocationWorkspaceWorkloadBindOption(locationPath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{MatchLabels: map[string]string{"loc": "loc1"}}), - ).Bind(t) - - t.Logf("Bind user workspace to location workspace with loc 2") - framework.NewBindCompute(t, userPath, source, - framework.WithLocationWorkspaceWorkloadBindOption(locationPath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{MatchLabels: map[string]string{"loc": "loc2"}}), - ).Bind(t) - - t.Logf("Wait for being able to list Services in the user workspace") - require.Eventually(t, func() bool { - _, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Create a service in the user workspace") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - Labels: map[string]string{ - "test.workload.kcp.io": firstSyncTargetName, - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the service to have the sync label") - framework.Eventually(t, func() (bool, string) { - svc, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Get(ctx, "first", metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get service: %v", err) - } - - if svc.Labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+workloadv1alpha1.ToSyncTargetKey(firstSyncerFixture.SyncTargetClusterName, firstSyncTargetName)] != string(workloadv1alpha1.ResourceStateSync) { - return false, fmt.Sprintf("%s is not added to ns annotation", firstSyncTargetName) - } - - if svc.Labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+workloadv1alpha1.ToSyncTargetKey(secondSyncerFixture.SyncTargetClusterName, secondSyncTargetName)] != string(workloadv1alpha1.ResourceStateSync) { - return false, fmt.Sprintf("%s is not added to ns annotation", secondSyncTargetName) - } - - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Wait for the service to be sync to the downstream cluster") - framework.Eventually(t, func() (bool, string) { - downstreamServices, err := firstSyncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + firstSyncTargetName, - }) - - if err != nil { - return false, fmt.Sprintf("Failed to list service: %v", err) - } - - if len(downstreamServices.Items) < 1 { - return false, "service is not synced" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - framework.Eventually(t, func() (bool, string) { - downstreamServices, err := secondSyncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + firstSyncTargetName, - }) - - if err != nil { - return false, fmt.Sprintf("Failed to list service: %v", err) - } - - if len(downstreamServices.Items) < 1 { - return false, "service is not synced" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) -} diff --git a/test/e2e/reconciler/scheduling/placement_scheduler_test.go b/test/e2e/reconciler/scheduling/placement_scheduler_test.go deleted file mode 100644 index 6365ed9d806..00000000000 --- a/test/e2e/reconciler/scheduling/placement_scheduler_test.go +++ /dev/null @@ -1,300 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func TestPlacementUpdate(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - source := framework.SharedKcpServer(t) - - orgPath, _ := framework.NewOrganizationFixture(t, source, framework.TODO_WithoutMultiShardSupport()) - locationPath, _ := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - userPath, userWorkspace := framework.NewWorkspaceFixture(t, source, orgPath, framework.TODO_WithoutMultiShardSupport()) - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - kcpClusterClient, err := kcpclientset.NewForConfig(source.BaseConfig(t)) - require.NoError(t, err) - - t.Logf("Check that there is no services resource in the user workspace") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - require.Error(t, err) - - firstSyncTargetName := fmt.Sprintf("synctarget-%d", +rand.Intn(1000000)) - t.Logf("Creating a SyncTarget and syncer in %s", locationPath) - syncerFixture := framework.NewSyncerFixture(t, source, locationPath, - framework.WithSyncTargetName(firstSyncTargetName), - framework.WithSyncedUserWorkspaces(userWorkspace), - ).CreateSyncTargetAndApplyToDownstream(t).StartSyncer(t) - - t.Log("Wait for \"default\" location") - require.Eventually(t, func() bool { - _, err = kcpClusterClient.Cluster(locationPath).SchedulingV1alpha1().Locations().Get(ctx, "default", metav1.GetOptions{}) - return err == nil - }, wait.ForeverTestTimeout, time.Millisecond*100) - - placementName := "placement-test-update" - t.Logf("Bind user workspace to location workspace") - framework.NewBindCompute(t, userPath, source, - framework.WithLocationWorkspaceWorkloadBindOption(locationPath), - framework.WithPlacementNameBindOption(placementName), - ).Bind(t) - - t.Logf("Wait for being able to list Services in the user workspace") - require.Eventually(t, func() bool { - _, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("").List(ctx, metav1.ListOptions{}) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - firstSyncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncerFixture.SyncTargetClusterName, firstSyncTargetName) - - t.Logf("Create a service in the user workspace") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Create(ctx, &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "first", - Labels: map[string]string{ - "test.workload.kcp.io": firstSyncTargetName, - }, - Annotations: map[string]string{ - "finalizers.workload.kcp.io/" + firstSyncTargetKey: "wait-a-bit", - }, - }, - Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Port: 80, - Protocol: corev1.ProtocolTCP, - }, - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the service to have the sync label") - framework.Eventually(t, func() (bool, string) { - svc, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Get(ctx, "first", metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get service: %v", err) - } - - return svc.Labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+firstSyncTargetKey] == string(workloadv1alpha1.ResourceStateSync), "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Wait for the service to be sync to the downstream cluster") - var downstreamServices *corev1.ServiceList - framework.Eventually(t, func() (bool, string) { - downstreamServices, err = syncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + firstSyncTargetName, - }) - - if err != nil { - return false, fmt.Sprintf("Failed to list service: %v", err) - } - - if len(downstreamServices.Items) < 1 { - return false, "service is not synced" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Update placement to disable scheduling on the ns") - framework.Eventually(t, func() (bool, string) { - placement, err := kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, placementName, metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get placement: %v", err) - } - - placement.Spec.NamespaceSelector = nil - _, err = kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Update(ctx, placement, metav1.UpdateOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to update placement: %v", err) - } - - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Placement should turn to unbound phase") - framework.Eventually(t, func() (bool, string) { - placement, err := kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, placementName, metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get placement: %v", err) - } - - return placement.Status.Phase == schedulingv1alpha1.PlacementUnbound, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - framework.Eventually(t, func() (bool, string) { - ns, err := kubeClusterClient.Cluster(userPath).CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get ns: %v", err) - } - - if len(ns.Annotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey]) == 0 { - return false, fmt.Sprintf("namespace should have a %s annotation, but it does not", workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - framework.Eventually(t, func() (bool, string) { - svc, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Get(ctx, "first", metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get service: %v", err) - } - - if len(svc.Annotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey]) == 0 { - return false, fmt.Sprintf("service should have a %s annotation, but it does not", workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Remove the soft finalizer on the service") - _, err = kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Patch(ctx, "first", types.MergePatchType, - []byte("{\"metadata\":{\"annotations\":{\"finalizers.workload.kcp.io/"+firstSyncTargetKey+"\":\"\"}}}"), metav1.PatchOptions{}) - require.NoError(t, err) - - t.Logf("Wait for the service to be removed in the downstream cluster") - require.Eventually(t, func() bool { - downstreamServices, err = syncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + firstSyncTargetName, - }) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } else if len(downstreamServices.Items) != 0 { - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) - - framework.Eventually(t, func() (bool, string) { - placement, err := kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, placementName, metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get placement: %v", err) - } - - placement.Spec.LocationSelectors = []metav1.LabelSelector{ - { - MatchLabels: map[string]string{ - "foo": "bar", - }, - }, - } - _, err = kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Update(ctx, placement, metav1.UpdateOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to update placement: %v", err) - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Placement should turn to pending phase") - framework.Eventually(t, func() (bool, string) { - placement, err := kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, placementName, metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get placement: %v", err) - } - - return placement.Status.Phase == schedulingv1alpha1.PlacementPending, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Create a new placement to include the location") - newPlacement := &schedulingv1alpha1.Placement{ - ObjectMeta: metav1.ObjectMeta{ - Name: "new-placement", - }, - Spec: schedulingv1alpha1.PlacementSpec{ - LocationSelectors: []metav1.LabelSelector{{}}, - NamespaceSelector: &metav1.LabelSelector{}, - LocationResource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - LocationWorkspace: locationPath.String(), - }, - } - _, err = kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Create(ctx, newPlacement, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Logf("Wait for new placement to be ready") - framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClusterClient.Cluster(userPath).SchedulingV1alpha1().Placements().Get(ctx, newPlacement.Name, metav1.GetOptions{}) - }, framework.Is(schedulingv1alpha1.PlacementReady)) - - t.Logf("Wait for resource to by synced again") - framework.Eventually(t, func() (bool, string) { - svc, err := kubeClusterClient.Cluster(userPath).CoreV1().Services("default").Get(ctx, "first", metav1.GetOptions{}) - if err != nil { - return false, fmt.Sprintf("Failed to get service: %v", err) - } - - if len(svc.Annotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey]) != 0 { - return false, fmt.Sprintf("resource should not have the %s annotation, but have %s", workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey, svc.Annotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+firstSyncTargetKey]) - } - return svc.Labels[workloadv1alpha1.ClusterResourceStateLabelPrefix+firstSyncTargetKey] == string(workloadv1alpha1.ResourceStateSync), "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Logf("Wait for the service to be sync to the downstream cluster") - require.Eventually(t, func() bool { - downstreamServices, err = syncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{ - LabelSelector: "test.workload.kcp.io=" + firstSyncTargetName, - }) - if errors.IsNotFound(err) { - return false - } else if err != nil { - t.Logf("Failed to list Services: %v", err) - return false - } else if len(downstreamServices.Items) < 1 { - return false - } - return true - }, wait.ForeverTestTimeout, time.Millisecond*100) -} diff --git a/test/e2e/reconciler/scheduling/upsynced_scheduling_test.go b/test/e2e/reconciler/scheduling/upsynced_scheduling_test.go deleted file mode 100644 index f9de9c13ef9..00000000000 --- a/test/e2e/reconciler/scheduling/upsynced_scheduling_test.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "context" - "fmt" - "testing" - "time" - - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - - "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -// TestUpsyncedScheduling verifies that the scheduler correctly manages upsynced resources, in order to -// ensure the desired behaviour, this test will: -// -// 1. Setup the basics of the test: -// - Create two distinct workspaces, a location worskpace and a user workspace. -// - Simulate the deployment of a syncer which would sync resources from the user workspace to a physical cluster (we only strart the heartbeat and APIImporter parts of the Syncer), without effective syncing. -// -// 2. Upsync a pod from to the user workspace. -// 3. Shutdown the healthchecker of the syncer, and verify that the upsynced pod is still scheduled to the current synctarget as "Upsync" -// 4. Restart the healthchecker of the syncer, and verify that the upsynced pod is still scheduled to the current synctarget as "Upsync" -// 5. Delete the synctarget, and verify that the upsynced pod gets deleted. -func TestUpsyncedScheduling(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upstreamServer := framework.PrivateKcpServer(t, framework.WithCustomArguments("--sync-target-heartbeat-threshold=20s")) - t.Log("Creating an organization") - orgPath, _ := framework.NewOrganizationFixture(t, upstreamServer, framework.TODO_WithoutMultiShardSupport()) - t.Log("Creating two workspaces, one for the synctarget and the other for the user workloads") - synctargetWsPath, synctargetWs := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - synctargetWsName := logicalcluster.Name(synctargetWs.Spec.Cluster) - userWsPath, userWs := framework.NewWorkspaceFixture(t, upstreamServer, orgPath, framework.TODO_WithoutMultiShardSupport()) - userWsName := logicalcluster.Name(userWs.Spec.Cluster) - - syncerFixture := framework.NewSyncerFixture(t, upstreamServer, synctargetWsName.Path(), - framework.WithExtraResources("pods"), - framework.WithExtraResources("deployments.apps"), - framework.WithSyncedUserWorkspaces(userWs), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - t.Log("Binding the consumer workspace to the location workspace") - framework.NewBindCompute(t, userWsName.Path(), upstreamServer, - framework.WithLocationWorkspaceWorkloadBindOption(synctargetWsName.Path()), - framework.WithAPIExportsWorkloadBindOption(synctargetWsName.String()+":"+workloadv1alpha1.ImportedAPISExportName), - ).Bind(t) - - upstreamConfig := upstreamServer.BaseConfig(t) - upstreamKubeClusterClient, err := kcpkubernetesclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - upstreamNamespace, err := upstreamKubeClusterClient.Cluster(userWsPath).CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-scheduling", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - upstreamKcpClient, err := kcpclientset.NewForConfig(upstreamConfig) - require.NoError(t, err) - - syncTarget, err := upstreamKcpClient.Cluster(synctargetWsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, - syncerFixture.SyncerConfig.SyncTargetName, - metav1.GetOptions{}, - ) - require.NoError(t, err) - - t.Log(t, "Wait for being able to list deployments in the consumer workspace via direct access") - require.Eventually(t, func() bool { - _, err := upstreamKubeClusterClient.Cluster(userWsPath).CoreV1().Pods("").List(ctx, metav1.ListOptions{}) - return !apierrors.IsNotFound(err) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - stateLabelKey := "state.workload.kcp.io/" + workloadv1alpha1.ToSyncTargetKey(synctargetWsName, syncTarget.Name) - - t.Log("Upsyncing Pod to KCP") - pod := corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: upstreamNamespace.Name, - Labels: map[string]string{ - stateLabelKey: "Upsync", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test-container", - }, - }, - }, - } - - // Create a client that uses the upsyncer URL - upsyncerVirtualWorkspaceConfig := rest.CopyConfig(upstreamConfig) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVirtualWorkspaceConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, upstreamKcpClient, userWs, syncerFixture.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - upsyncerKCPClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVirtualWorkspaceConfig) - require.NoError(t, err) - - _, err = upsyncerKCPClient.Cluster(userWsName.Path()).CoreV1().Pods(upstreamNamespace.Name).Create(ctx, &pod, metav1.CreateOptions{}) - require.NoError(t, err) - - t.Log("Checking that the upsynced POD has the state set to Upsync...") - framework.Eventually(t, func() (bool, string) { - _, err := upstreamKubeClusterClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) - if err != nil { - return false, err.Error() - } - if pod.Labels[stateLabelKey] == "Upsync" { - return true, "" - } - return false, fmt.Sprintf("expected state to be Upsync, got %s", pod.Labels[stateLabelKey]) - }, wait.ForeverTestTimeout, time.Millisecond*100, "expected state to be Upsync, got %s", pod.Labels[stateLabelKey]) - - t.Log("Stopping the syncer healthchecker...") - syncerFixture.StopHeartBeat(t) - - t.Log("Checking that the synctarget is not ready...") - framework.Eventually(t, func() (bool, string) { - syncTarget, err := upstreamKcpClient.Cluster(synctargetWsPath).WorkloadV1alpha1().SyncTargets().Get(ctx, syncTarget.Name, metav1.GetOptions{}) - if err != nil { - return false, err.Error() - } - if conditions.IsTrue(syncTarget, workloadv1alpha1.HeartbeatHealthy) { - return false, "expected synctarget to be not ready" - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - t.Log("Checking that the upsynced POD remains in the Upsync state...") - require.Never(t, func() bool { - _, err := upstreamKubeClusterClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) - if err != nil { - return false - } - return pod.Labels[stateLabelKey] != "Upsync" - }, 5*time.Second, time.Millisecond*100, "expected state to be Upsync, got %s", pod.Labels[stateLabelKey]) - - t.Log("Starting the syncer healthcheck again...") - syncerFixture.StartHeartBeat(t) - - t.Log("Checking that the upsynced POD remains in the Upsync state...") - require.Never(t, func() bool { - _, err := upstreamKubeClusterClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) - if err != nil { - return false - } - return pod.Labels[stateLabelKey] != "Upsync" - }, 5*time.Second, time.Millisecond*100, "expected state to be Upsync, got %s", pod.Labels[stateLabelKey]) - - t.Log("Deleting the Synctarget...") - err = upstreamKcpClient.Cluster(synctargetWsPath).WorkloadV1alpha1().SyncTargets().Delete(ctx, syncTarget.Name, metav1.DeleteOptions{}) - require.NoError(t, err) - - t.Log("Checking that the upsynced Pod has been deleted...") - framework.Eventually(t, func() (bool, string) { - _, err := upstreamKubeClusterClient.Cluster(userWsPath).CoreV1().Pods(upstreamNamespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - return true, "" - } - return false, err.Error() - } - return false, "expected the pod to be deleted" - }, wait.ForeverTestTimeout, time.Millisecond*100) -} diff --git a/test/e2e/virtual/syncer/virtualworkspace_test.go b/test/e2e/virtual/syncer/virtualworkspace_test.go deleted file mode 100644 index 62d5eb1160f..00000000000 --- a/test/e2e/virtual/syncer/virtualworkspace_test.go +++ /dev/null @@ -1,2161 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package syncer - -import ( - "context" - "fmt" - "sort" - "strings" - "testing" - "time" - - "github.com/google/go-cmp/cmp" - kcpdiscovery "github.com/kcp-dev/client-go/discovery" - kcpkubernetesclientset "github.com/kcp-dev/client-go/kubernetes" - "github.com/kcp-dev/logicalcluster/v3" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/endpoints/discovery" - "k8s.io/client-go/rest" - - "github.com/kcp-dev/kcp/config/rootcompute" - "github.com/kcp-dev/kcp/pkg/syncer/shared" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" - tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" - kubefixtures "github.com/kcp-dev/kcp/test/e2e/fixtures/kube" - fixturewildwest "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest" - "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest" - wildwestv1alpha1 "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest/v1alpha1" - wildwestclientset "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/client/clientset/versioned/cluster" - "github.com/kcp-dev/kcp/test/e2e/framework" -) - -func deploymentsAPIResourceList(clusterName logicalcluster.Name) *metav1.APIResourceList { - return &metav1.APIResourceList{ - TypeMeta: metav1.TypeMeta{ - Kind: "APIResourceList", - APIVersion: "v1", - }, - GroupVersion: "apps/v1", - APIResources: []metav1.APIResource{ - { - Kind: "Deployment", - Name: "deployments", - SingularName: "deployment", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(clusterName, "apps", "v1", "Deployment"), - Categories: []string{"all"}, - ShortNames: []string{"deploy"}, - }, - { - Kind: "Deployment", - Name: "deployments/status", - Namespaced: true, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - }, - } -} - -func requiredCoreAPIResourceList(clusterName logicalcluster.Name) *metav1.APIResourceList { - return &metav1.APIResourceList{ - TypeMeta: metav1.TypeMeta{ - Kind: "APIResourceList", - }, - GroupVersion: "v1", - APIResources: []metav1.APIResource{ - { - Kind: "ConfigMap", - Name: "configmaps", - SingularName: "configmap", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(clusterName, "", "v1", "ConfigMap"), - }, - { - Kind: "Namespace", - Name: "namespaces", - SingularName: "namespace", - Namespaced: false, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(clusterName, "", "v1", "Namespace"), - }, - { - Kind: "Namespace", - Name: "namespaces/status", - SingularName: "", - Namespaced: false, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - { - Kind: "Secret", - Name: "secrets", - SingularName: "secret", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(clusterName, "", "v1", "Secret"), - }, - { - Kind: "ServiceAccount", - Name: "serviceaccounts", - SingularName: "serviceaccount", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(clusterName, "", "v1", "ServiceAccount"), - }, - }, - } -} - -func withRootComputeAPIResourceList(workspaceName logicalcluster.Name, rootComputeLogicalCluster logicalcluster.Name) []*metav1.APIResourceList { - coreResourceList := requiredCoreAPIResourceList(workspaceName) - coreResourceList.APIResources = append(coreResourceList.APIResources, - metav1.APIResource{ - Kind: "Endpoints", - Name: "endpoints", - SingularName: "endpoints", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - ShortNames: []string{"ep"}, - StorageVersionHash: discovery.StorageVersionHash(rootComputeLogicalCluster, "", "v1", "Endpoints"), - }, - metav1.APIResource{ - Kind: "Service", - Name: "services", - SingularName: "service", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - ShortNames: []string{"svc"}, - Categories: []string{"all"}, - StorageVersionHash: discovery.StorageVersionHash(rootComputeLogicalCluster, "", "v1", "Service"), - }, - metav1.APIResource{ - Kind: "Service", - Name: "services/status", - SingularName: "", - Namespaced: true, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - ) - - return []*metav1.APIResourceList{ - deploymentsAPIResourceList(rootComputeLogicalCluster), - { - TypeMeta: metav1.TypeMeta{ - Kind: "APIResourceList", - APIVersion: "v1", - }, - GroupVersion: "networking.k8s.io/v1", - APIResources: []metav1.APIResource{ - { - Kind: "Ingress", - Name: "ingresses", - SingularName: "ingress", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - ShortNames: []string{"ing"}, - StorageVersionHash: discovery.StorageVersionHash(rootComputeLogicalCluster, "networking.k8s.io", "v1", "Ingress"), - }, - { - Kind: "Ingress", - Name: "ingresses/status", - Namespaced: true, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - }, - }, - coreResourceList, - } -} - -func logWithTimestampf(t *testing.T, format string, args ...interface{}) { - t.Helper() - t.Logf("[%s] %s", time.Now().Format("15:04:05.000000"), fmt.Sprintf(format, args...)) -} - -func TestSyncerVirtualWorkspace(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - server := framework.SharedKcpServer(t) - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - kcpClusterClient, err := kcpclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - var testCases = []struct { - name string - work func(t *testing.T, testCaseWorkspace logicalcluster.Path) - }{ - { - name: "isolated API domains per syncer", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kubelikeLocationWorkspacePath, kubelikeLocationWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("kubelike-locations"), framework.TODO_WithoutMultiShardSupport()) - kubelikeLocationWorkspaceClusterName := logicalcluster.Name(kubelikeLocationWorkspace.Spec.Cluster) - logWithTimestampf(t, "Deploying syncer into workspace %s", kubelikeLocationWorkspacePath) - kubelikeSyncer := framework.NewSyncerFixture(t, server, kubelikeLocationWorkspacePath, - framework.WithSyncTargetName("kubelike"), - framework.WithAPIExports("root:compute:kubernetes"), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - if !isFakePCluster { - // Only need to install services and ingresses in a logical cluster - return - } - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err, "failed to create apiextensions client") - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - kubefixtures.Create(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), - metav1.GroupResource{Group: "networking.k8s.io", Resource: "ingresses"}, - ) - require.NoError(t, err) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - kubelikeSyncer.StopHeartBeat(t) - kubelikeSyncer.StartHeartBeat(t) - - wildwestLocationWorkspacePath, wildwestLocationWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - wildwestLocationWorkspaceClusterName := logicalcluster.Name(wildwestLocationWorkspace.Spec.Cluster) - logWithTimestampf(t, "Deploying syncer into workspace %s", wildwestLocationWorkspacePath) - - wildwestSyncer := framework.NewSyncerFixture(t, server, wildwestLocationWorkspacePath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest"), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - // We need to get a resource in the "root:compute" cluster to get the logical cluster name, in this case we use the - // kubernetes APIExport, as we know that it exists. - export, err := kcpClusterClient.Cluster(rootcompute.RootComputeClusterName).ApisV1alpha1().APIExports().Get(context.Background(), "kubernetes", metav1.GetOptions{}) - require.NoError(t, err) - rootComputeLogicalCluster := logicalcluster.From(export) - - kubelikeVWDiscoverConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - kubelikeVWDiscoverConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, kubelikeLocationWorkspace, kubelikeSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - kubelikeVWDiscoverClusterClient, err := kcpdiscovery.NewForConfig(kubelikeVWDiscoverConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Check discovery in kubelike virtual workspace") - framework.Eventually(t, func() (bool, string) { - _, kubelikeAPIResourceLists, err := kubelikeVWDiscoverClusterClient.ServerGroupsAndResources() - if err != nil { - return false, err.Error() - } - diff := cmp.Diff( - sortAPIResourceList(withRootComputeAPIResourceList(kubelikeLocationWorkspaceClusterName, rootComputeLogicalCluster)), - sortAPIResourceList(kubelikeAPIResourceLists)) - return len(diff) == 0, diff - }, wait.ForeverTestTimeout, time.Millisecond*100) - - wildwestVWDiscoverConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestVWDiscoverConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, wildwestLocationWorkspace, wildwestSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - wildwestVWDiscoverClusterClient, err := kcpdiscovery.NewForConfig(wildwestVWDiscoverConfig) - - logWithTimestampf(t, "Check discovery in wildwest virtual workspace") - require.NoError(t, err) - framework.Eventually(t, func() (bool, string) { - _, wildwestAPIResourceLists, err := wildwestVWDiscoverClusterClient.ServerGroupsAndResources() - if err != nil { - return false, err.Error() - } - diff := cmp.Diff([]*metav1.APIResourceList{ - requiredCoreAPIResourceList(wildwestLocationWorkspaceClusterName), - { - TypeMeta: metav1.TypeMeta{ - Kind: "APIResourceList", - APIVersion: "v1", - }, - GroupVersion: "wildwest.dev/v1alpha1", - APIResources: []metav1.APIResource{ - { - Kind: "Cowboy", - Name: "cowboys", - SingularName: "cowboy", - Namespaced: true, - Verbs: metav1.Verbs{"get", "list", "patch", "update", "watch"}, - StorageVersionHash: discovery.StorageVersionHash(wildwestLocationWorkspaceClusterName, "wildwest.dev", "v1alpha1", "Cowboy"), - }, - { - Kind: "Cowboy", - Name: "cowboys/status", - Namespaced: true, - Verbs: metav1.Verbs{"get", "patch", "update"}, - StorageVersionHash: "", - }, - }, - }, - }, sortAPIResourceList(wildwestAPIResourceLists)) - return len(diff) == 0, diff - }, wait.ForeverTestTimeout, time.Millisecond*100) - }, - }, - { - name: "access is authorized", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - wildwestLocationPath, wildwestLocationWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - logWithTimestampf(t, "Deploying syncer into workspace %s", wildwestLocationPath) - - wildwestSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest"), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - logWithTimestampf(t, "Bind wildwest location workspace to itself") - framework.NewBindCompute(t, wildwestLocationPath, server, - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - ).Bind(t) - - logWithTimestampf(t, "Create two service accounts") - _, err := kubeClusterClient.Cluster(wildwestLocationPath).CoreV1().ServiceAccounts("default").Create(ctx, &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-account-1", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - _, err = kubeClusterClient.Cluster(wildwestLocationPath).CoreV1().ServiceAccounts("default").Create(ctx, &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-account-2", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - var token1, token2 string - framework.Eventually(t, func() (bool, string) { - secrets, err := kubeClusterClient.Cluster(wildwestLocationPath).CoreV1().Secrets("default").List(ctx, metav1.ListOptions{}) - require.NoError(t, err, "failed to list secrets") - for _, secret := range secrets.Items { - if secret.Annotations[corev1.ServiceAccountNameKey] == "service-account-1" { - token1 = string(secret.Data[corev1.ServiceAccountTokenKey]) - } - if secret.Annotations[corev1.ServiceAccountNameKey] == "service-account-2" { - token2 = string(secret.Data[corev1.ServiceAccountTokenKey]) - } - } - return token1 != "" && token2 != "", fmt.Sprintf("token1=%q - token2=%q", token1, token2) - }, wait.ForeverTestTimeout, time.Millisecond*100, "token secret for default service account not created") - - wildwestVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, wildwestLocationWorkspace, wildwestSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - - configUser1 := framework.ConfigWithToken(token1, wildwestVWConfig) - configUser2 := framework.ConfigWithToken(token2, wildwestVWConfig) - - vwClusterClientUser1, err := wildwestclientset.NewForConfig(configUser1) - require.NoError(t, err) - vwClusterClientUser2, err := wildwestclientset.NewForConfig(configUser2) - require.NoError(t, err) - - logWithTimestampf(t, "Check discovery in wildwest virtual workspace with unprivileged service-account-1, expecting forbidden") - _, err = vwClusterClientUser1.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - require.Error(t, err) - require.True(t, errors.IsForbidden(err)) - - logWithTimestampf(t, "Giving service-account-2 permissions to access wildwest virtual workspace") - _, err = kubeClusterClient.Cluster(wildwestLocationPath).RbacV1().ClusterRoleBindings().Create(ctx, - &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "service-account-2-sync-access", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: "service-account-2", - Namespace: "default", - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "wildwest-syncer", - }, - }, metav1.CreateOptions{}, - ) - require.NoError(t, err) - _, err = kubeClusterClient.Cluster(wildwestLocationPath).RbacV1().ClusterRoles().Create(ctx, - &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: "wildwest-syncer", - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"sync"}, - APIGroups: []string{"workload.kcp.io"}, - Resources: []string{"synctargets"}, - ResourceNames: []string{"wildwest"}, - }, - }, - }, metav1.CreateOptions{}, - ) - require.NoError(t, err) - - logWithTimestampf(t, "Check discovery in wildwest virtual workspace with unprivileged service-account-2, expecting success") - framework.Eventually(t, func() (bool, string) { - _, err = vwClusterClientUser2.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - return err == nil, fmt.Sprintf("waiting for service-account-2 to be able to list cowboys: %v", err) - }, wait.ForeverTestTimeout, time.Millisecond*200) - - logWithTimestampf(t, "Double check that service-account-1 still cannot access wildwest virtual workspace") - _, err = vwClusterClientUser1.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - require.Error(t, err) - require.True(t, errors.IsForbidden(err)) - }, - }, - { - name: "access kcp resources in location workspace through syncer virtual workspace ", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - wildwestLocationPath, wildwestLocationWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - wildwestLocationClusterName := logicalcluster.Name(wildwestLocationWorkspace.Spec.Cluster) - logWithTimestampf(t, "Deploying syncer into workspace %s", wildwestLocationPath) - - wildwestSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest"), - framework.WithSyncedUserWorkspaces(wildwestLocationWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - logWithTimestampf(t, "Bind wildwest location workspace to itself") - framework.NewBindCompute(t, wildwestLocationPath, server, - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - ).Bind(t) - - wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - syncTargetKey := wildwestSyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Wait for being able to list cowboys in the consumer workspace via direct access") - framework.Eventually(t, func() (bool, string) { - _, err := wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Create cowboy luckyluke") - _, err = wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("default").Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "luckyluke", - }, - Spec: wildwestv1alpha1.CowboySpec{ - Intent: "should catch joe", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - wildwestVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, wildwestLocationWorkspace, wildwestSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwClusterClient, err := wildwestclientset.NewForConfig(wildwestVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Verify there is one cowboy via direct access") - kcpCowboys, err := wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, kcpCowboys.Items, 1) - - logWithTimestampf(t, "Wait until the virtual workspace has the resource") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy and then show up via virtual workspace wildcard request") - var cowboys *wildwestv1alpha1.CowboyList - framework.Eventually(t, func() (bool, string) { - cowboys, err = vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.LessOrEqual(t, len(cowboys.Items), 1, "expected no other cowboy than luckyluke, got %d cowboys.", len(cowboys.Items)) - return len(cowboys.Items) == 1, fmt.Sprintf("cowboys items length: %d", len(cowboys.Items)) - }, wait.ForeverTestTimeout, time.Millisecond*100) - require.Equal(t, "luckyluke", cowboys.Items[0].Name) - - logWithTimestampf(t, "Verify there is luckyluke via virtual workspace request") - kcpCowboy, err := wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - virtualWorkspaceCowboy, err := vwClusterClient.Cluster(wildwestLocationClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, kcpCowboy.UID, virtualWorkspaceCowboy.UID) - require.Equal(t, kcpCowboy.Spec, virtualWorkspaceCowboy.Spec) - require.Equal(t, kcpCowboy.Status, virtualWorkspaceCowboy.Status) - - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToSync := map[string]string{} - for name, value := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - syncTargetsToSync[strings.TrimPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix)] = value - } - } - - return len(syncTargetsToSync) == 1 && - syncTargetsToSync[syncTargetKey] == "Sync", fmt.Sprintf("%v", syncTargetsToSync) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Add the syncer finalizer to simulate the Syncer has taken ownership of it") - kcpCowboy, err = wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"finalizers\":[%q]}}", - shared.SyncerFinalizerNamePrefix+syncTargetKey, - )), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via virtual workspace to report in status that joe is in prison") - _, err = vwClusterClient.Cluster(wildwestLocationClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via virtual workspace to catch averell") - _, err = vwClusterClient.Cluster(wildwestLocationClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"spec\":{\"intent\":\"should catch averell\"}}"), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Verify that luckyluke has only status changed on the syncer view, since the spec.intent field is not part of summarized fields") - virtualWorkspaceModifiedkcpCowboy, err := vwClusterClient.Cluster(wildwestLocationClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.NotEqual(t, kcpCowboy.ResourceVersion, virtualWorkspaceModifiedkcpCowboy.ResourceVersion) - - expectedModifiedKcpCowboy := kcpCowboy.DeepCopy() - expectedModifiedKcpCowboy.Status.Result = "joe in prison" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Status, virtualWorkspaceModifiedkcpCowboy.Status)) - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, virtualWorkspaceModifiedkcpCowboy.Spec)) - - logWithTimestampf(t, "Verify that luckyluke has also status changed on the upstream view, since the status field is promoted by default") - modifiedkcpCowboy, err := wildwestClusterClient.Cluster(wildwestLocationPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.NotEqual(t, kcpCowboy.ResourceVersion, modifiedkcpCowboy.ResourceVersion) - require.Equal(t, virtualWorkspaceModifiedkcpCowboy.ResourceVersion, modifiedkcpCowboy.ResourceVersion) - - expectedModifiedKcpCowboy.Status.Result = "joe in prison" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Status, modifiedkcpCowboy.Status)) - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, modifiedkcpCowboy.Spec)) - }, - }, - { - name: "access kcp resources through syncer virtual workspace, from a other workspace to the wildwest resources through an APIBinding", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - consumerPath, consumerWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("consumer"), framework.TODO_WithoutMultiShardSupport()) - consumerClusterName := logicalcluster.Name(consumerWorkspace.Spec.Cluster) - - wildwestLocationPath, _ := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - logWithTimestampf(t, "Deploying syncer into workspace %s", wildwestLocationPath) - - wildwestSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - logWithTimestampf(t, "Bind consumer workspace to wildwest location workspace") - framework.NewBindCompute(t, consumerPath, server, - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - ).Bind(t) - - wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - logWithTimestampf(t, "Wait for being able to list cowboys in the consumer workspace via direct access") - framework.Eventually(t, func() (bool, string) { - _, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - syncTargetKey := wildwestSyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Create cowboy luckyluke") - _, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "luckyluke", - }, - Spec: wildwestv1alpha1.CowboySpec{ - Intent: "should catch joe", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - wildwestVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwClusterClient, err := wildwestclientset.NewForConfig(wildwestVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Verify there is one cowboy via direct access") - kcpCowboys, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, kcpCowboys.Items, 1) - - logWithTimestampf(t, "Wait until the virtual workspace has the resource") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy and then show up via virtual workspace wildcard request") - var cowboys *wildwestv1alpha1.CowboyList - framework.Eventually(t, func() (bool, string) { - cowboys, err = vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.LessOrEqual(t, len(cowboys.Items), 1, "expected no other cowboy than luckyluke, got %d cowboys.", len(cowboys.Items)) - return len(cowboys.Items) == 1, fmt.Sprintf("cowboys items length: %d", len(cowboys.Items)) - }, wait.ForeverTestTimeout, time.Millisecond*100) - require.Equal(t, "luckyluke", cowboys.Items[0].Name) - - logWithTimestampf(t, "Verify there is luckyluke via direct access") - kcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - virtualWorkspaceCowboy, err := vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, kcpCowboy.UID, virtualWorkspaceCowboy.UID) - require.Empty(t, cmp.Diff(kcpCowboy.Spec, virtualWorkspaceCowboy.Spec)) - require.Empty(t, cmp.Diff(kcpCowboy.Status, virtualWorkspaceCowboy.Status)) - - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToSync := map[string]string{} - for name, value := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - syncTargetsToSync[strings.TrimPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix)] = value - } - } - - return len(syncTargetsToSync) == 1 && - syncTargetsToSync[syncTargetKey] == "Sync", fmt.Sprintf("%v", syncTargetsToSync) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Add the syncer finalizer to simulate the Syncer has taken ownership of it") - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"finalizers\":[%q]}}", - shared.SyncerFinalizerNamePrefix+syncTargetKey, - )), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via virtual workspace to report in status that joe is in prison") - _, err = vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via virtual workspace to catch averell") - _, err = vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"spec\":{\"intent\":\"should catch averell\"}}"), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Verify that luckyluke has only status changed on the syncer view, since the spec.intent field is not part of summarized fields") - virtualWorkspaceModifiedkcpCowboy, err := vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.NotEqual(t, kcpCowboy.ResourceVersion, virtualWorkspaceModifiedkcpCowboy.ResourceVersion) - - expectedModifiedKcpCowboy := kcpCowboy.DeepCopy() - expectedModifiedKcpCowboy.Status.Result = "joe in prison" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Status, virtualWorkspaceModifiedkcpCowboy.Status)) - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, virtualWorkspaceModifiedkcpCowboy.Spec)) - - logWithTimestampf(t, "Verify that luckyluke has also status changed on the upstream view, since the status field is promoted by default") - modifiedkcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.NotEqual(t, kcpCowboy.ResourceVersion, modifiedkcpCowboy.ResourceVersion) - require.Equal(t, virtualWorkspaceModifiedkcpCowboy.ResourceVersion, modifiedkcpCowboy.ResourceVersion) - - expectedModifiedKcpCowboy.Status.Result = "joe in prison" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Status, modifiedkcpCowboy.Status)) - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, modifiedkcpCowboy.Spec)) - }, - }, - { - name: "Never promote overridden syncer view status to upstream when scheduled on 2 synctargets", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kcpClusterClient, err := kcpclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - consumerPath, consumerWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("consumer"), framework.TODO_WithoutMultiShardSupport()) - consumerClusterName := logicalcluster.Name(consumerWorkspace.Spec.Cluster) - - wildwestLocationPath, _ := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - logWithTimestampf(t, "Deploying north syncer into workspace %s", wildwestLocationPath) - - wildwestNorthSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest-north"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - _, err = kcpClusterClient.Cluster(wildwestLocationPath).WorkloadV1alpha1().SyncTargets().Patch(ctx, "wildwest-north", types.JSONPatchType, []byte(`[{"op":"add","path":"/metadata/labels/region","value":"north"}]`), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Deploying south syncer into workspace %s", wildwestLocationPath) - wildwestSouthSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest-south"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - _, err = kcpClusterClient.Cluster(wildwestLocationPath).WorkloadV1alpha1().SyncTargets().Patch(ctx, "wildwest-south", types.JSONPatchType, []byte(`[{"op":"add","path":"/metadata/labels/region","value":"south"}]`), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Create 2 locations, one for each SyncTarget") - _, err = kcpClusterClient.Cluster(wildwestLocationPath).SchedulingV1alpha1().Locations().Create(ctx, &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "firstlocation", - Labels: map[string]string{ - "region": "north", - }, - }, - Spec: schedulingv1alpha1.LocationSpec{ - InstanceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "north", - }, - }, - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - _, err = kcpClusterClient.Cluster(wildwestLocationPath).SchedulingV1alpha1().Locations().Create(ctx, &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "secondlocation", - Labels: map[string]string{ - "region": "south", - }, - }, - Spec: schedulingv1alpha1.LocationSpec{ - InstanceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "south", - }, - }, - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Create 2 placements, one for each SyncTarget") - framework.NewBindCompute(t, consumerPath, server, - framework.WithPlacementNameBindOption("north"), - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "north", - }, - }), - ).Bind(t) - - framework.NewBindCompute(t, consumerPath, server, - framework.WithPlacementNameBindOption("south"), - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "south", - }, - }), - ).Bind(t) - - wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - logWithTimestampf(t, "Wait for being able to list cowboys in the consumer workspace via direct access") - framework.Eventually(t, func() (bool, string) { - _, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Create cowboy luckyluke") - _, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "luckyluke", - }, - Spec: wildwestv1alpha1.CowboySpec{ - Intent: "should catch joe", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - wildwestNorthVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestNorthVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestNorthSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwNorthClusterClient, err := wildwestclientset.NewForConfig(wildwestNorthVWConfig) - require.NoError(t, err) - - wildwestSouthVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestSouthVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestSouthSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwSouthClusterClient, err := wildwestclientset.NewForConfig(wildwestSouthVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Verify there is one cowboy via direct access") - kcpCowboys, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, kcpCowboys.Items, 1) - - logWithTimestampf(t, "Wait until the north virtual workspace has the resource") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwNorthClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait until the south virtual workspace has the resource") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwSouthClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy on the 2 synctargets") - var kcpCowboy *wildwestv1alpha1.Cowboy - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - resourceStateLabelCount := 0 - for name := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - resourceStateLabelCount++ - } - } - - return resourceStateLabelCount == 2, fmt.Sprintf("resourceStateLabelCount: %d", resourceStateLabelCount) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Add the syncer finalizers to simulate that the 2 Syncers have taken ownership of it") - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"finalizers\":[%q,%q]}}", - shared.SyncerFinalizerNamePrefix+wildwestNorthSyncer.ToSyncTargetKey(), - shared.SyncerFinalizerNamePrefix+wildwestSouthSyncer.ToSyncTargetKey(), - )), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via north virtual workspace to report in status that joe is in northern prison") - _, err = vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in northern prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via second virtual workspace to report in status that joe is in southern prison") - _, err = vwSouthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in southern prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Verify that luckyluke has status changed on the syncer view of north syncer") - northVirtualWorkspaceModifiedkcpCowboy, err := vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in northern prison", northVirtualWorkspaceModifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Verify that luckyluke has status changed on the syncer view of south syncer") - southVirtualWorkspaceModifiedkcpCowboy, err := vwSouthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in southern prison", southVirtualWorkspaceModifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Verify that luckyluke has status unchanged on the upstream view, since the status field is never promoted when a resource is scheduled to 2 synctargets") - modifiedkcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, kcpCowboy.Status, modifiedkcpCowboy.Status) - }, - }, - { - name: "Correctly manage status, with promote and unpromote, when moving a cowboy from one synctarget to the other", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - kcpClusterClient, err := kcpclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - consumerPath, consumerWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("consumer"), framework.TODO_WithoutMultiShardSupport()) - consumerClusterName := logicalcluster.Name(consumerWorkspace.Spec.Cluster) - - wildwestLocationPath, _ := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - logWithTimestampf(t, "Deploying north syncer into workspace %s", wildwestLocationPath) - - wildwestNorthSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest-north"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - _, err = kcpClusterClient.Cluster(wildwestLocationPath).WorkloadV1alpha1().SyncTargets().Patch(ctx, "wildwest-north", types.JSONPatchType, []byte(`[{"op":"add","path":"/metadata/labels/region","value":"north"}]`), metav1.PatchOptions{}) - require.NoError(t, err) - - northSyncTargetKey := wildwestNorthSyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Deploying south syncer into workspace %s", wildwestLocationPath) - wildwestSouthSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest-south"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - _, err = kcpClusterClient.Cluster(wildwestLocationPath).WorkloadV1alpha1().SyncTargets().Patch(ctx, "wildwest-south", types.JSONPatchType, []byte(`[{"op":"add","path":"/metadata/labels/region","value":"south"}]`), metav1.PatchOptions{}) - require.NoError(t, err) - - southSyncTargetKey := wildwestSouthSyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Delete default location") - err = kcpClusterClient.Cluster(wildwestLocationPath).SchedulingV1alpha1().Locations().Delete(ctx, "default", metav1.DeleteOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Create 2 locations, one for each SyncTarget") - _, err = kcpClusterClient.Cluster(wildwestLocationPath).SchedulingV1alpha1().Locations().Create(ctx, &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "firstlocation", - Labels: map[string]string{ - "region": "north", - }, - }, - Spec: schedulingv1alpha1.LocationSpec{ - InstanceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "north", - }, - }, - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - _, err = kcpClusterClient.Cluster(wildwestLocationPath).SchedulingV1alpha1().Locations().Create(ctx, &schedulingv1alpha1.Location{ - ObjectMeta: metav1.ObjectMeta{ - Name: "secondlocation", - Labels: map[string]string{ - "region": "south", - }, - }, - Spec: schedulingv1alpha1.LocationSpec{ - InstanceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "south", - }, - }, - Resource: schedulingv1alpha1.GroupVersionResource{ - Group: "workload.kcp.io", - Version: "v1alpha1", - Resource: "synctargets", - }, - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Using User workspace: %s", consumerPath.String()) - - logWithTimestampf(t, "Create the north placement, for the north SyncTarget") - framework.NewBindCompute(t, consumerPath, server, - framework.WithPlacementNameBindOption("north"), - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "north", - }, - }), - ).Bind(t) - - logWithTimestampf(t, "Wait for being able to list cowboys in the consumer workspace via direct access") - framework.Eventually(t, func() (bool, string) { - _, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - wildwestNorthVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestNorthVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestNorthSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwNorthClusterClient, err := wildwestclientset.NewForConfig(wildwestNorthVWConfig) - require.NoError(t, err) - wildwestSouthVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestSouthVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestSouthSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwSouthClusterClient, err := wildwestclientset.NewForConfig(wildwestSouthVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Wait until the north virtual workspace has the resource type") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwNorthClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait until the south virtual workspace has the resource type") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwSouthClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Create cowboy luckyluke") - _, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "luckyluke", - }, - Spec: wildwestv1alpha1.CowboySpec{ - Intent: "should catch joe", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Verify there is one cowboy via direct access") - kcpCowboys, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, kcpCowboys.Items, 1) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy on the north synctarget, and for the syncer to own it") - var kcpCowboy *wildwestv1alpha1.Cowboy - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToSync := map[string]string{} - for name, value := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - syncTargetsToSync[strings.TrimPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix)] = value - } - } - - syncTargetsWithFinalizer := sets.New[string]() - for _, name := range kcpCowboy.Finalizers { - if strings.HasPrefix(name, shared.SyncerFinalizerNamePrefix) { - syncTargetsWithFinalizer.Insert(strings.TrimPrefix(name, shared.SyncerFinalizerNamePrefix)) - } - } - - return len(syncTargetsToSync) == 1 && - syncTargetsToSync[northSyncTargetKey] == "Sync", fmt.Sprintf("%v", syncTargetsToSync) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Add the north syntarget finalizer to simulate that the north Syncer has taken ownership of it") - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"finalizers\":[%q]}}", - shared.SyncerFinalizerNamePrefix+wildwestNorthSyncer.ToSyncTargetKey(), - )), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via north virtual workspace to report in status that joe is in northern prison") - _, err = vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in northern prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Verify that luckyluke has status changed on the syncer view of north syncer") - northVirtualWorkspaceModifiedkcpCowboy, err := vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in northern prison", northVirtualWorkspaceModifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Verify that luckyluke has also status changed on the upstream view, since the status field is promoted when scheduled on only one synctarget") - modifiedkcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in northern prison", modifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Create the south placement, for the south SyncTarget") - framework.NewBindCompute(t, consumerPath, server, - framework.WithPlacementNameBindOption("south"), - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - framework.WithLocationSelectorWorkloadBindOption(metav1.LabelSelector{ - MatchLabels: map[string]string{ - "region": "south", - }, - }), - ).Bind(t) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy on the 2 synctargets, and for both syncers to own it") - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToSync := map[string]string{} - for name, value := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - syncTargetsToSync[strings.TrimPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix)] = value - } - } - - return len(syncTargetsToSync) == 2 && - syncTargetsToSync[northSyncTargetKey] == "Sync" && - syncTargetsToSync[southSyncTargetKey] == "Sync", fmt.Sprintf("%v", syncTargetsToSync) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Add the 2 syncer finalizers to simulate that the 2 Syncers have taken ownership of it") - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"finalizers\":[%q,%q]}}", - shared.SyncerFinalizerNamePrefix+wildwestNorthSyncer.ToSyncTargetKey(), - shared.SyncerFinalizerNamePrefix+wildwestSouthSyncer.ToSyncTargetKey(), - )), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via south virtual workspace to report in status that joe is in southern prison") - _, err = vwSouthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in southern prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Verify that luckyluke has status unchanged on the syncer view of the north syncer") - northVirtualWorkspaceModifiedkcpCowboy, err = vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in northern prison", northVirtualWorkspaceModifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Verify that luckyluke has status changed on the syncer view of south syncer") - southVirtualWorkspaceModifiedkcpCowboy, err := vwSouthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in southern prison", southVirtualWorkspaceModifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Verify that luckyluke has status unchanged on the upstream view, since no syncer view status has been promoted since the last promotion, because scheduled on 2 synctargets") - modifiedkcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in northern prison", modifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Remove the placement for the north SyncTarget") - err = kcpClusterClient.Cluster(consumerPath).SchedulingV1alpha1().Placements().Delete(ctx, "north", metav1.DeleteOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Wait for resource controller to plan removal from north synctarget") - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToBeRemoved := map[string]string{} - for name, value := range kcpCowboy.Annotations { - if strings.HasPrefix(name, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix) { - syncTargetsToBeRemoved[strings.TrimPrefix(name, workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix)] = value - } - } - - return len(syncTargetsToBeRemoved) == 1 && - syncTargetsToBeRemoved[northSyncTargetKey] != "", fmt.Sprintf("%v", syncTargetsToBeRemoved) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Remove the north syntarget finalizer to simulate that the north Syncer has finished with it") - _, err = vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"metadata\":{\"finalizers\":null}}"), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy on the south synctarget only") - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToSync := map[string]string{} - for name, value := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - syncTargetsToSync[strings.TrimPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix)] = value - } - } - - return len(syncTargetsToSync) == 1 && - syncTargetsToSync[southSyncTargetKey] == "Sync", fmt.Sprintf("%v", syncTargetsToSync) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Verify that luckyluke is not known on the north synctarget") - _, err = vwNorthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.EqualError(t, err, `cowboys.wildwest.dev "luckyluke" not found`) - - logWithTimestampf(t, "Verify that luckyluke has status unchanged on the syncer view of south syncer") - southVirtualWorkspaceModifiedkcpCowboy, err = vwSouthClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in southern prison", southVirtualWorkspaceModifiedkcpCowboy.Status.Result) - - logWithTimestampf(t, "Verify that luckyluke has now status changed on the upstream view, since the status for the south syncer has now been promoted to upstream.") - modifiedkcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, "joe in southern prison", modifiedkcpCowboy.Status.Result) - }, - }, - { - name: "Transform spec through spec-diff annotation", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - consumerPath, consumerWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("consumer"), framework.TODO_WithoutMultiShardSupport()) - consumerClusterName := logicalcluster.Name(consumerWorkspace.Spec.Cluster) - - wildwestLocationPath, _ := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - logWithTimestampf(t, "Deploying syncer into workspace %s", wildwestLocationPath) - - wildwestSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - logWithTimestampf(t, "Bind consumer workspace to wildwest location workspace") - framework.NewBindCompute(t, consumerPath, server, - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - ).Bind(t) - - wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - logWithTimestampf(t, "Wait for being able to list cowboys in the consumer workspace via direct access") - framework.Eventually(t, func() (bool, string) { - _, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - syncTargetKey := wildwestSyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Create cowboy luckyluke") - _, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "luckyluke", - Annotations: map[string]string{ - "experimental.spec-diff.workload.kcp.io/" + syncTargetKey: `[{ "op": "replace", "path": "/intent", "value": "should catch joe and averell" }]`, - }, - }, - Spec: wildwestv1alpha1.CowboySpec{ - Intent: "should catch joe", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Verify there is one cowboy via direct access") - kcpCowboys, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, kcpCowboys.Items, 1) - - wildwestVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwClusterClient, err := wildwestclientset.NewForConfig(wildwestVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Wait until the virtual workspace has the resource") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy and then show up via virtual workspace wildcard request") - var cowboys *wildwestv1alpha1.CowboyList - framework.Eventually(t, func() (bool, string) { - cowboys, err = vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.LessOrEqual(t, len(cowboys.Items), 1, "expected no other cowboy than luckyluke, got %d cowboys.", len(cowboys.Items)) - return len(cowboys.Items) == 1, fmt.Sprintf("cowboys items length: %d", len(cowboys.Items)) - }, wait.ForeverTestTimeout, time.Millisecond*100) - require.Equal(t, "luckyluke", cowboys.Items[0].Name) - - logWithTimestampf(t, "Verify there is luckyluke via direct access") - kcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - virtualWorkspaceCowboy, err := vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, kcpCowboy.UID, virtualWorkspaceCowboy.UID) - - expectedModifiedKcpCowboy := kcpCowboy.DeepCopy() - expectedModifiedKcpCowboy.Status.Result = "" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe and averell" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, virtualWorkspaceCowboy.Spec)) - }, - }, - { - name: "Override summarizing rules to disable status promotion", - work: func(t *testing.T, testCaseWorkspace logicalcluster.Path) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - wildwestClusterClient, err := wildwestclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - consumerPath, consumerWorkspace := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("consumer"), framework.TODO_WithoutMultiShardSupport()) - consumerClusterName := logicalcluster.Name(consumerWorkspace.Spec.Cluster) - - wildwestLocationPath, _ := framework.NewWorkspaceFixture(t, server, testCaseWorkspace, framework.WithName("wildwest-locations"), framework.TODO_WithoutMultiShardSupport()) - logWithTimestampf(t, "Deploying syncer into workspace %s", wildwestLocationPath) - - wildwestSyncer := framework.NewSyncerFixture(t, server, wildwestLocationPath, - framework.WithExtraResources("cowboys.wildwest.dev"), - // empty APIExports so we do not add global kubernetes APIExport. - framework.WithAPIExports(""), - framework.WithSyncTargetName("wildwest"), - framework.WithSyncedUserWorkspaces(consumerWorkspace), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - // Always install the crd regardless of whether the target is - // logical or not since cowboys is not a native type. - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err) - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - fixturewildwest.FakePClusterCreate(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), metav1.GroupResource{Group: wildwest.GroupName, Resource: "cowboys"}) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - logWithTimestampf(t, "Bind consumer workspace to wildwest location workspace") - framework.NewBindCompute(t, consumerPath, server, - framework.WithAPIExportsWorkloadBindOption(wildwestLocationPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - framework.WithLocationWorkspaceWorkloadBindOption(wildwestLocationPath), - ).Bind(t) - - syncTargetKey := wildwestSyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Wait for being able to list cowboys in the consumer workspace via direct access") - framework.Eventually(t, func() (bool, string) { - _, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Create cowboy luckyluke") - _, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Create(ctx, &wildwestv1alpha1.Cowboy{ - ObjectMeta: metav1.ObjectMeta{ - Name: "luckyluke", - Annotations: map[string]string{ - "experimental.summarizing.workload.kcp.io": `[{"fieldPath": "status", "promoteToUpstream": false}]`, - }, - }, - Spec: wildwestv1alpha1.CowboySpec{ - Intent: "should catch joe", - }, - }, metav1.CreateOptions{}) - require.NoError(t, err) - - wildwestVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - wildwestVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, consumerWorkspace, wildwestSyncer.GetSyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Syncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Syncer virtual workspace URL not found") - vwClusterClient, err := wildwestclientset.NewForConfig(wildwestVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Verify there is one cowboy via direct access") - kcpCowboys, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.Len(t, kcpCowboys.Items, 1) - - logWithTimestampf(t, "Wait until the virtual workspace has the cowboy resource type") - framework.Eventually(t, func() (bool, string) { - // resources show up asynchronously, so we have to try until List works. Then it should return all object immediately. - _, err := vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Wait for resource controller to schedule cowboy and then show up via virtual workspace wildcard request") - var cowboys *wildwestv1alpha1.CowboyList - framework.Eventually(t, func() (bool, string) { - cowboys, err = vwClusterClient.WildwestV1alpha1().Cowboys().List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - require.LessOrEqual(t, len(cowboys.Items), 1, "expected no other cowboy than luckyluke, got %d cowboys.", len(cowboys.Items)) - return len(cowboys.Items) == 1, fmt.Sprintf("cowboys items length: %d", len(cowboys.Items)) - }, wait.ForeverTestTimeout, time.Millisecond*100) - require.Equal(t, "luckyluke", cowboys.Items[0].Name) - - logWithTimestampf(t, "Verify there is luckyluke via direct access and through virtual workspace") - kcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - virtualWorkspaceCowboy, err := vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, kcpCowboy.UID, virtualWorkspaceCowboy.UID) - require.Equal(t, kcpCowboy.Spec, virtualWorkspaceCowboy.Spec) - require.Equal(t, kcpCowboy.Status, virtualWorkspaceCowboy.Status) - framework.Eventually(t, func() (bool, string) { - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - if errors.IsNotFound(err) { - return false, err.Error() - } - require.NoError(t, err) - syncTargetsToSync := map[string]string{} - for name, value := range kcpCowboy.Labels { - if strings.HasPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix) { - syncTargetsToSync[strings.TrimPrefix(name, workloadv1alpha1.ClusterResourceStateLabelPrefix)] = value - } - } - - syncTargetsWithFinalizer := sets.New[string]() - for _, name := range kcpCowboy.Finalizers { - if strings.HasPrefix(name, shared.SyncerFinalizerNamePrefix) { - syncTargetsWithFinalizer.Insert(strings.TrimPrefix(name, shared.SyncerFinalizerNamePrefix)) - } - } - - return len(syncTargetsToSync) == 1 && - syncTargetsToSync[syncTargetKey] == "Sync", fmt.Sprintf("%v", syncTargetsToSync) - }, wait.ForeverTestTimeout, time.Millisecond*100) - - logWithTimestampf(t, "Add the syncer finalizer to simulate the Syncer has taken ownership of it") - kcpCowboy, err = wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"finalizers\":[%q]}}", - shared.SyncerFinalizerNamePrefix+syncTargetKey, - )), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via virtual workspace to report in status that joe is in prison") - _, err = vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"status\":{\"result\":\"joe in prison\"}}"), metav1.PatchOptions{}, "status") - require.NoError(t, err) - - logWithTimestampf(t, "Patch luckyluke via virtual workspace to catch averell") - _, err = vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Patch(ctx, "luckyluke", types.MergePatchType, []byte("{\"spec\":{\"intent\":\"should catch averell\"}}"), metav1.PatchOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Verify that luckyluke has only status changed on the syncer view, since the spec.intent field is not part of summarized fields") - virtualWorkspaceModifiedkcpCowboy, err := vwClusterClient.Cluster(consumerClusterName.Path()).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.NotEqual(t, kcpCowboy.ResourceVersion, virtualWorkspaceModifiedkcpCowboy.ResourceVersion) - - expectedModifiedKcpCowboy := kcpCowboy.DeepCopy() - expectedModifiedKcpCowboy.Status.Result = "joe in prison" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Status, virtualWorkspaceModifiedkcpCowboy.Status)) - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, virtualWorkspaceModifiedkcpCowboy.Spec)) - - logWithTimestampf(t, "Verify that luckyluke has status unchanged on the upstream view, since the status field promotion has been disabled by annotation") - modifiedkcpCowboy, err := wildwestClusterClient.Cluster(consumerPath).WildwestV1alpha1().Cowboys("default").Get(ctx, "luckyluke", metav1.GetOptions{}) - require.NoError(t, err) - require.NotEqual(t, kcpCowboy.ResourceVersion, modifiedkcpCowboy.ResourceVersion) - require.Equal(t, virtualWorkspaceModifiedkcpCowboy.ResourceVersion, modifiedkcpCowboy.ResourceVersion) - - expectedModifiedKcpCowboy.Status.Result = "" - expectedModifiedKcpCowboy.Spec.Intent = "should catch joe" - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Status, modifiedkcpCowboy.Status)) - require.Empty(t, cmp.Diff(expectedModifiedKcpCowboy.Spec, modifiedkcpCowboy.Spec)) - }, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - orgPath, _ := framework.NewOrganizationFixture(t, server, framework.TODO_WithoutMultiShardSupport(), framework.TODO_WithoutMultiShardSupport()) - - testCase.work(t, orgPath) - }) - } -} - -func TestUpsyncerVirtualWorkspace(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - server := framework.SharedKcpServer(t) - - kubeClusterClient, err := kcpkubernetesclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - kcpClusterClient, err := kcpclientset.NewForConfig(server.BaseConfig(t)) - require.NoError(t, err) - - var testCases = []struct { - name string - work func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) - }{ - { - name: "list kcp resources through upsyncer virtual workspace", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Upsync", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - logWithTimestampf(t, "Creating PV %s through direct kube client ...", pv.Name) - _, err = kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Create(ctx, pv, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Listing PVs through upsyncer virtual workspace...") - pvs, err := kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).List(ctx, metav1.ListOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Checking if we can find the PV we created in the source cluster: test-pv") - require.Len(t, pvs.Items, 1) - }, - }, - { - name: "create a persistentvolume in kcp through upsyncer virtual workspace", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Upsync", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - - logWithTimestampf(t, "Creating PV test-pv through upsyncer virtual workspace...") - pv, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Create(ctx, pv, metav1.CreateOptions{}) - require.NoError(t, err) - require.Empty(t, pv.Status) - - logWithTimestampf(t, "Updating status of the PV test-pv through upsyncer virtual workspace...") - pv.Status.Phase = corev1.VolumeAvailable - _, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).UpdateStatus(ctx, pv, metav1.UpdateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Checking if the PV test-pv was created in the source cluster...") - pv, err = kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Get(ctx, "test-pv", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, corev1.VolumeAvailable, pv.Status.Phase) - }, - }, - { - name: "create a persistentvolume in kcp through upsyncer virtual workspace with a resource transformation", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Upsync", - }, - Annotations: map[string]string{ - "internal.workload.kcp.io/upsyncdiff" + syncTargetKey: "[{\"op\":\"replace\",\"path\":\"/spec/capacity/storage\",\"value\":\"2Gi\"}]", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - - logWithTimestampf(t, "Creating PV test-pv through upsyncer virtual workspace...") - _, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Create(ctx, pv, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Checking if the PV test-pv4 was created in the source cluster...") - pvCreated, err := kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Get(ctx, "test-pv", metav1.GetOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Checking if the PV test-pv was created with the correct values after transformation...") - require.Equal(t, resource.MustParse("2Gi"), pvCreated.Spec.Capacity[corev1.ResourceStorage]) - }, - }, - { - name: "try to create a persistentvolume in kcp through upsyncer virtual workspace, without the statelabel set to Upsync, should fail", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "notupsync", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - - logWithTimestampf(t, "Creating PV test-pv through upsyncer virtual workspace...") - _, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Create(ctx, pv, metav1.CreateOptions{}) - require.Error(t, err) - }, - }, - { - name: "update a persistentvolume in kcp through upsyncer virtual workspace", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Creating a persistentvolume in the kubelike source cluster...") - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Upsync", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - logWithTimestampf(t, "Creating PV %s through direct kube client ...", pv.Name) - _, err = kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Create(ctx, pv, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Getting PV test-pv through upsyncer virtual workspace...") - pv, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Get(ctx, "test-pv", metav1.GetOptions{}) - require.NoError(t, err) - - pv.Spec.PersistentVolumeSource.HostPath.Path = "/tmp/data2" - - logWithTimestampf(t, "Updating PV test-pv through upsyncer virtual workspace...") - _, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Update(ctx, pv, metav1.UpdateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Checking if the PV test-pv was updated in the source cluster...") - pv, err = kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Get(ctx, "test-pv", metav1.GetOptions{}) - require.NoError(t, err) - require.Equal(t, pv.Spec.PersistentVolumeSource.HostPath.Path, "/tmp/data2") - }, - }, - { - name: "update a persistentvolume in kcp through upsyncer virtual workspace, try to remove the upsync state label, expect error.", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Creating a persistentvolume in the kubelike source cluster...") - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Upsync", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - logWithTimestampf(t, "Creating PV %s through direct kube client ...", pv.Name) - _, err = kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Create(ctx, pv, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Getting PV test-pv through upsyncer virtual workspace...") - pv, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Get(ctx, "test-pv", metav1.GetOptions{}) - require.NoError(t, err) - - // Changing the label to something else, should fail. - pv.Labels["state.workload.kcp.io/"+syncTargetKey] = "notupsync" - pv.Spec.PersistentVolumeSource.HostPath.Path = "/tmp/data/changed" - - logWithTimestampf(t, "Updating PV test-pv through upsyncer virtual workspace...") - _, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Update(ctx, pv, metav1.UpdateOptions{}) - require.Error(t, err) - - logWithTimestampf(t, "Ensure PV test-pv is not changed...") - pv, err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Get(ctx, "test-pv", metav1.GetOptions{}) - require.NoError(t, err) - - require.Equal(t, pv.Spec.PersistentVolumeSource.HostPath.Path, "/tmp/data") - }, - }, - { - name: "Delete a persistentvolume in kcp through upsyncer virtual workspace", - work: func(t *testing.T, syncer *framework.StartedSyncerFixture, clusterName logicalcluster.Name, ws *tenancyv1alpha1.Workspace, path logicalcluster.Path, syncTargetKey string) { - t.Helper() - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, ws, syncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeSyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Creating a persistentvolume in the kubelike source cluster...") - pv := &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - Labels: map[string]string{ - "state.workload.kcp.io/" + syncTargetKey: "Upsync", - }, - }, - Spec: corev1.PersistentVolumeSpec{ - AccessModes: []corev1.PersistentVolumeAccessMode{ - corev1.ReadWriteOnce, - }, - Capacity: corev1.ResourceList{ - corev1.ResourceStorage: resource.MustParse("1Gi"), - }, - PersistentVolumeSource: corev1.PersistentVolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/data", - }, - }, - }, - } - logWithTimestampf(t, "Creating PV %s through direct kube client ...", pv.Name) - _, err = kubeClusterClient.CoreV1().PersistentVolumes().Cluster(path).Create(ctx, pv, metav1.CreateOptions{}) - require.NoError(t, err) - - logWithTimestampf(t, "Deleting PV test-pv3 through upsyncer virtual workspace...") - err = kubelikeSyncerVWClient.CoreV1().PersistentVolumes().Cluster(clusterName.Path()).Delete(ctx, "test-pv", metav1.DeleteOptions{}) - require.NoError(t, err) - }, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - framework.Suite(t, "transparent-multi-cluster") - - ctx, cancelFunc := context.WithCancel(context.Background()) - t.Cleanup(cancelFunc) - - orgPath, _ := framework.NewOrganizationFixture(t, server, framework.TODO_WithoutMultiShardSupport()) - - upsyncerPath, upsyncerWS := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithName("upsyncer"), framework.TODO_WithoutMultiShardSupport()) - upsyncerClusterName := logicalcluster.Name(upsyncerWS.Spec.Cluster) - - logWithTimestampf(t, "Deploying syncer into workspace %s", upsyncerPath) - upsyncer := framework.NewSyncerFixture(t, server, upsyncerPath, - framework.WithSyncTargetName("upsyncer"), - framework.WithExtraResources("persistentvolumes"), - framework.WithAPIExports(""), - framework.WithDownstreamPreparation(func(config *rest.Config, isFakePCluster bool) { - if !isFakePCluster { - // Only need to install services,ingresses and persistentvolumes in a logical cluster - return - } - sinkCrdClient, err := apiextensionsclientset.NewForConfig(config) - require.NoError(t, err, "failed to create apiextensions client") - logWithTimestampf(t, "Installing test CRDs into sink cluster...") - kubefixtures.Create(t, sinkCrdClient.ApiextensionsV1().CustomResourceDefinitions(), - metav1.GroupResource{Group: "core.k8s.io", Resource: "persistentvolumes"}, - ) - require.NoError(t, err) - }), - ).CreateSyncTargetAndApplyToDownstream(t).StartAPIImporter(t).StartHeartBeat(t) - - logWithTimestampf(t, "Bind upsyncer workspace") - framework.NewBindCompute(t, upsyncerPath, server, - framework.WithAPIExportsWorkloadBindOption(upsyncerPath.Join(workloadv1alpha1.ImportedAPISExportName).String()), - ).Bind(t) - - logWithTimestampf(t, "Waiting for the persistentvolumes crd to be imported and available in the upsyncer source cluster...") - framework.Eventually(t, func() (bool, string) { - _, err := kubeClusterClient.CoreV1().PersistentVolumes().Cluster(upsyncerPath).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - upsyncerVWConfig := rest.CopyConfig(server.BaseConfig(t)) - framework.Eventually(t, func() (found bool, _ string) { - var err error - upsyncerVWConfig.Host, found, err = framework.VirtualWorkspaceURL(ctx, kcpClusterClient, upsyncerWS, upsyncer.GetUpsyncerVirtualWorkspaceURLs()) - require.NoError(t, err) - return found, "Upsyncer virtual workspace URL not found" - }, wait.ForeverTestTimeout, time.Millisecond*100, "Upsyncer virtual workspace URL not found") - kubelikeUpsyncerVWClient, err := kcpkubernetesclientset.NewForConfig(upsyncerVWConfig) - require.NoError(t, err) - - logWithTimestampf(t, "Waiting for the persistentvolumes to be available in the upsyncer virtual workspace...") - framework.Eventually(t, func() (bool, string) { - _, err := kubelikeUpsyncerVWClient.CoreV1().PersistentVolumes().Cluster(upsyncerClusterName.Path()).List(ctx, metav1.ListOptions{}) - if err != nil { - return false, err.Error() - } - return true, "" - }, wait.ForeverTestTimeout, time.Millisecond*100) - - syncTargetKey := upsyncer.ToSyncTargetKey() - - logWithTimestampf(t, "Starting test...") - testCase.work(t, upsyncer, upsyncerClusterName, upsyncerWS, upsyncerPath, syncTargetKey) - }) - } -} - -type ByGroupVersion []*metav1.APIResourceList - -func (a ByGroupVersion) Len() int { return len(a) } -func (a ByGroupVersion) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByGroupVersion) Less(i, j int) bool { return a[i].GroupVersion < a[j].GroupVersion } - -type ByName []metav1.APIResource - -func (a ByName) Len() int { return len(a) } -func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByName) Less(i, j int) bool { return a[i].Name < a[j].Name } - -func sortAPIResourceList(list []*metav1.APIResourceList) []*metav1.APIResourceList { - sort.Sort(ByGroupVersion(list)) - for _, resource := range list { - sort.Sort(ByName(resource.APIResources)) - } - return list -} From a222f7706024a76cefebaec9c2dab354ed6e77c0 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 24 May 2023 15:09:08 +0200 Subject: [PATCH 04/15] refactor TestRootAPIExportAuthorizers --- test/e2e/virtual/apiexport/authorizer_test.go | 39 +++++++++---------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/test/e2e/virtual/apiexport/authorizer_test.go b/test/e2e/virtual/apiexport/authorizer_test.go index c1eb9c60cfb..18cd5d7e464 100644 --- a/test/e2e/virtual/apiexport/authorizer_test.go +++ b/test/e2e/virtual/apiexport/authorizer_test.go @@ -51,11 +51,9 @@ import ( "github.com/kcp-dev/kcp/sdk/apis/apis" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/core" - "github.com/kcp-dev/kcp/sdk/apis/scheduling" - schedulingv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/scheduling/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy" tenancyv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - workloadv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/workload/v1alpha1" kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest" "github.com/kcp-dev/kcp/test/e2e/framework" @@ -584,14 +582,14 @@ func TestRootAPIExportAuthorizers(t *testing.T) { err = helpers.CreateResourceFromFS(ctx, serviceDynamicClusterClient.Cluster(servicePath), mapper, nil, "apiresourceschema_cowboys.yaml", testFiles) require.NoError(t, err) - t.Logf("Get the root scheduling APIExport's identity hash") + t.Logf("Get the root tenancy APIExport's identity hash") framework.EventuallyCondition(t, func() (conditions.Getter, error) { - return kcpClient.Cluster(core.RootCluster.Path()).ApisV1alpha1().APIExports().Get(ctx, "scheduling.kcp.io", metav1.GetOptions{}) + return kcpClient.Cluster(core.RootCluster.Path()).ApisV1alpha1().APIExports().Get(ctx, "tenancy.kcp.io", metav1.GetOptions{}) }, framework.Is(apisv1alpha1.APIExportIdentityValid)) - schedulingAPIExport, err := kcpClient.Cluster(core.RootCluster.Path()).ApisV1alpha1().APIExports().Get(ctx, "scheduling.kcp.io", metav1.GetOptions{}) + tenancyAPIExport, err := kcpClient.Cluster(core.RootCluster.Path()).ApisV1alpha1().APIExports().Get(ctx, "tenancy.kcp.io", metav1.GetOptions{}) require.NoError(t, err) - identityHash := schedulingAPIExport.Status.IdentityHash + identityHash := tenancyAPIExport.Status.IdentityHash require.NotNil(t, identityHash) t.Logf("Create an APIExport for APIResourceSchema in service provider %q", servicePath) @@ -603,7 +601,7 @@ func TestRootAPIExportAuthorizers(t *testing.T) { LatestResourceSchemas: []string{"today.cowboys.wildwest.dev"}, PermissionClaims: []apisv1alpha1.PermissionClaim{ { - GroupResource: apisv1alpha1.GroupResource{Group: scheduling.GroupName, Resource: "placements"}, + GroupResource: apisv1alpha1.GroupResource{Group: tenancy.GroupName, Resource: "workspacetypes"}, IdentityHash: identityHash, All: true, }, @@ -640,7 +638,7 @@ func TestRootAPIExportAuthorizers(t *testing.T) { PermissionClaims: []apisv1alpha1.AcceptablePermissionClaim{ { PermissionClaim: apisv1alpha1.PermissionClaim{ - GroupResource: apisv1alpha1.GroupResource{Group: scheduling.GroupName, Resource: "placements"}, + GroupResource: apisv1alpha1.GroupResource{Group: tenancy.GroupName, Resource: "workspacetypes"}, IdentityHash: identityHash, All: true, }, @@ -666,18 +664,17 @@ func TestRootAPIExportAuthorizers(t *testing.T) { require.NoError(t, err) t.Logf("Verify that service user can create a claimed resource in user workspace") - placement := &unstructured.Unstructured{ + workspaceType := &unstructured.Unstructured{ Object: map[string]interface{}{ - "apiVersion": schedulingv1alpha1.SchemeGroupVersion.String(), - "kind": "Placement", + "apiVersion": tenancyv1alpha1.SchemeGroupVersion.String(), + "kind": "WorkspaceType", "metadata": map[string]interface{}{ "name": "default", }, "spec": map[string]interface{}{ - "locationResource": map[string]interface{}{ - "group": workloadv1alpha1.SchemeGroupVersion.Group, - "resource": "synctargets", - "version": workloadv1alpha1.SchemeGroupVersion.Version, + "defaultChildWorkspaceType": map[string]interface{}{ + "name": "universal", + "path": "root", }, }, }, @@ -685,13 +682,13 @@ func TestRootAPIExportAuthorizers(t *testing.T) { framework.Eventually(t, func() (bool, string) { _, err = serviceDynamicVWClient.Cluster(userClusterName.Path()). - Resource(schedulingv1alpha1.SchemeGroupVersion.WithResource("placements")). - Create(ctx, placement, metav1.CreateOptions{}) - return err == nil, fmt.Sprintf("error creating placement: %v", err) - }, wait.ForeverTestTimeout, time.Millisecond*100, "error creating placement") + Resource(tenancyv1alpha1.SchemeGroupVersion.WithResource("workspacetypes")). + Create(ctx, workspaceType, metav1.CreateOptions{}) + return err == nil, fmt.Sprintf("error creating workspacetype: %v", err) + }, wait.ForeverTestTimeout, time.Millisecond*100, "error creating workspacetype") t.Logf("Verify that consumer user can get the created resource in user workspace") - _, err = userKcpClient.Cluster(userClusterName.Path()).SchedulingV1alpha1().Placements().Get(ctx, placement.GetName(), metav1.GetOptions{}) + _, err = userKcpClient.Cluster(userClusterName.Path()).TenancyV1alpha1().WorkspaceTypes().Get(ctx, workspaceType.GetName(), metav1.GetOptions{}) require.NoError(t, err) } From 7884c019c449df5592262d0925b2a4fcb931e640 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 24 May 2023 15:15:58 +0200 Subject: [PATCH 05/15] refactor TestProtectedAPIFromServiceExports --- test/e2e/apibinding/apibinding_protected_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/apibinding/apibinding_protected_test.go b/test/e2e/apibinding/apibinding_protected_test.go index 57c8eb55ee9..e1127c6a2fc 100644 --- a/test/e2e/apibinding/apibinding_protected_test.go +++ b/test/e2e/apibinding/apibinding_protected_test.go @@ -33,8 +33,8 @@ import ( "github.com/kcp-dev/kcp/config/helpers" apisv1alpha1 "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + "github.com/kcp-dev/kcp/sdk/apis/tenancy" "github.com/kcp-dev/kcp/sdk/apis/third_party/conditions/util/conditions" - "github.com/kcp-dev/kcp/sdk/apis/workload" kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" "github.com/kcp-dev/kcp/test/e2e/framework" ) @@ -136,12 +136,12 @@ func TestProtectedAPIFromServiceExports(t *testing.T) { require.NoError(t, err, "failed to construct kcp cluster client for server") t.Logf("Get tenancy APIExport from root workspace") - workloadsRoot, err := kcpClusterClient.ApisV1alpha1().APIExports().Cluster(rootWorkspace).Get(ctx, workload.GroupName, metav1.GetOptions{}) + tenanciesRoot, err := kcpClusterClient.ApisV1alpha1().APIExports().Cluster(rootWorkspace).Get(ctx, tenancy.GroupName, metav1.GetOptions{}) require.NoError(t, err) t.Logf("Construct VirtualWorkspace client") //nolint:staticcheck // SA1019 VirtualWorkspaces is deprecated but not removed yet - vwURL := workloadsRoot.Status.VirtualWorkspaces[0].URL + vwURL := tenanciesRoot.Status.VirtualWorkspaces[0].URL cfgVW := server.RootShardSystemMasterBaseConfig(t) cfgVW.Host = vwURL @@ -149,6 +149,6 @@ func TestProtectedAPIFromServiceExports(t *testing.T) { require.NoError(t, err) t.Logf("Make sure we can access tenancy API from VirtualWorkspace") - _, err = vwClient.WorkloadV1alpha1().SyncTargets().List(ctx, metav1.ListOptions{}) + _, err = vwClient.TenancyV1alpha1().Workspaces().List(ctx, metav1.ListOptions{}) require.NoError(t, err) } From 4f526ca1e8ba4d42f5bfc7c9631da8b78b35491d Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 24 May 2023 15:23:12 +0200 Subject: [PATCH 06/15] keep feature gate boilerplate without tripping up the linter --- pkg/features/kcp_features.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/features/kcp_features.go b/pkg/features/kcp_features.go index 7999c5e5fb2..c269d650c01 100644 --- a/pkg/features/kcp_features.go +++ b/pkg/features/kcp_features.go @@ -30,6 +30,7 @@ import ( logsapi "k8s.io/component-base/logs/api/v1" ) +//nolint:gocritic const ( // Every feature gate should add method here following this template: // From 95ae2a46c4b66b3448375b7b088c6b6cc1c3e4db Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 24 May 2023 15:54:58 +0200 Subject: [PATCH 07/15] go mod tidy --- go.mod | 12 +----------- go.sum | 28 ---------------------------- 2 files changed, 1 insertion(+), 39 deletions(-) diff --git a/go.mod b/go.mod index a2c47c0dff3..eb07ab03404 100644 --- a/go.mod +++ b/go.mod @@ -5,9 +5,6 @@ go 1.19 require ( github.com/MakeNowJust/heredoc v1.0.0 github.com/abiosoft/lineprefix v0.1.4 - github.com/aojea/rwconn v0.1.1 - github.com/coredns/caddy v1.1.1 - github.com/coredns/coredns v1.9.3 github.com/davecgh/go-spew v1.1.1 github.com/egymgmbh/go-prefix-writer v0.0.0-20180609083313-7326ea162eca github.com/emicklei/go-restful/v3 v3.10.2 @@ -22,7 +19,6 @@ require ( github.com/kcp-dev/kcp/sdk v0.0.0-00010101000000-000000000000 github.com/kcp-dev/logicalcluster/v3 v3.0.4 github.com/martinlindhe/base36 v1.1.1 - github.com/miekg/dns v1.1.50 github.com/muesli/reflow v0.1.0 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 github.com/prometheus/client_golang v1.15.0 @@ -33,7 +29,6 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.5 go.etcd.io/etcd/server/v3 v3.5.5 go.uber.org/multierr v1.7.0 - golang.org/x/net v0.9.0 gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.26.3 @@ -56,7 +51,6 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect - github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -65,12 +59,9 @@ require ( github.com/coreos/go-oidc v2.1.0+incompatible // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/dnstap/golang-dnstap v0.4.0 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/farsightsec/golang-framestream v0.3.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-errors/errors v1.0.1 // indirect @@ -92,7 +83,6 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect - github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect @@ -113,7 +103,6 @@ require ( github.com/onsi/gomega v1.27.4 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/selinux v1.10.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -148,6 +137,7 @@ require ( go.uber.org/zap v1.19.0 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/mod v0.9.0 // indirect + golang.org/x/net v0.9.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.7.0 // indirect diff --git a/go.sum b/go.sum index c5f46a04896..e15d7b37512 100644 --- a/go.sum +++ b/go.sum @@ -55,10 +55,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/aojea/rwconn v0.1.1 h1:vsYyhoQghQ5HH98QE+xmNwnKsTm8GxWjpvxGft6s7q8= -github.com/aojea/rwconn v0.1.1/go.mod h1:LUO0QX1YNsA51BR48slR87GsvEMiTTOWNdC6aoG+BTA= -github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= -github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -103,10 +99,6 @@ github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcK github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= -github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/coredns v1.9.3 h1:eYvVtY9n6qZhHGtQaiOu6ku87Mz9kkSP/6RvFZ48VtA= -github.com/coredns/coredns v1.9.3/go.mod h1:AS3cwa8gzBsPEqNI6p6LAVNaF7/0SgjjlQQ9JSrzncQ= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= @@ -127,8 +119,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnstap/golang-dnstap v0.4.0 h1:KRHBoURygdGtBjDI2w4HifJfMAhhOqDuktAokaSa234= -github.com/dnstap/golang-dnstap v0.4.0/go.mod h1:FqsSdH58NAmkAvKcpyxht7i4FoBjKu8E4JUPt8ipSUs= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -148,16 +138,12 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA= -github.com/farsightsec/golang-framestream v0.3.0/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -272,7 +258,6 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -293,8 +278,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -414,9 +397,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -455,8 +435,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= -github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -694,7 +672,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -712,7 +689,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= @@ -753,7 +729,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -789,7 +764,6 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= @@ -838,7 +812,6 @@ golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -863,7 +836,6 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From c05c053ae25fda43f7758840aa8950de904701ff Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Wed, 31 May 2023 23:59:01 +0200 Subject: [PATCH 08/15] remove workload-related reconciler e2e tests --- test/e2e/reconciler/cache/replication_test.go | 182 ------------------ 1 file changed, 182 deletions(-) diff --git a/test/e2e/reconciler/cache/replication_test.go b/test/e2e/reconciler/cache/replication_test.go index 046ef871259..9ebe3319053 100644 --- a/test/e2e/reconciler/cache/replication_test.go +++ b/test/e2e/reconciler/cache/replication_test.go @@ -65,10 +65,6 @@ var scenarios = []testScenario{ {"TestReplicateAPIResourceSchemaNegative", replicateAPIResourceSchemaNegativeScenario}, {"TestReplicateWorkspaceType", replicateWorkspaceTypeScenario}, {"TestReplicateWorkspaceTypeNegative", replicateWorkspaceTypeNegativeScenario}, - {"TestReplicateWorkloadsClusterRole", replicateWorkloadsClusterRoleScenario}, - {"TestReplicateWorkloadsClusterRoleNegative", replicateWorkloadsClusterRoleNegativeScenario}, - {"TestReplicateWorkloadsClusterRoleBinding", replicateWorkloadsClusterRoleBindingScenario}, - {"TestReplicateWorkloadsClusterRoleBindingNegative", replicateWorkloadsClusterRoleBindingNegativeScenario}, } // disruptiveScenarios contains a list of scenarios that will be run in a private environment @@ -746,181 +742,3 @@ func createCacheClientConfigForEnvironment(ctx context.Context, t *testing.T, kc require.NoError(t, err) return cacheServerRestConfig } - -// replicateWorkloadsClusterRoleScenario tests if a ClusterRole related to workloads API is propagated to the cache server. -// The test exercises creation, modification and removal of the ClusterRole object. -func replicateWorkloadsClusterRoleScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { - t.Helper() - replicateResource(ctx, - t, - server, - kcpShardClusterDynamicClient, - cacheKcpClusterDynamicClient, - "", - "ClusterRole", - rbacv1.SchemeGroupVersion.WithResource("clusterroles"), - &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: withPseudoRandomSuffix("syncer"), - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"sync"}, - APIGroups: []string{"workload.kcp.io"}, - Resources: []string{"synctargets"}, - ResourceNames: []string{"asynctarget"}, - }, - }, - }, - nil, - ) -} - -// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object. -func replicateWorkloadsClusterRoleNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { - t.Helper() - replicateResourceNegative( - ctx, - t, - server, - kcpShardClusterDynamicClient, - cacheKcpClusterDynamicClient, - "", - "ClusterRole", - rbacv1.SchemeGroupVersion.WithResource("clusterroles"), - &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: withPseudoRandomSuffix("syncer"), - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"sync"}, - APIGroups: []string{"workload.kcp.io"}, - Resources: []string{"synctargets"}, - ResourceNames: []string{"asynctarget"}, - }, - }, - }, - nil, - ) -} - -// replicateWorkloadsClusterRoleBindingScenario tests if a ClusterRoleBinding related to workloads API is propagated to the cache server. -// The test exercises creation, modification and removal of the ClusterRoleBinding object. -func replicateWorkloadsClusterRoleBindingScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { - t.Helper() - - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: withPseudoRandomSuffix("syncer"), - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"sync"}, - APIGroups: []string{"workload.kcp.io"}, - Resources: []string{"synctargets"}, - ResourceNames: []string{"asynctarget"}, - }, - }, - } - - orgPath, _ := framework.NewOrganizationFixture(t, server) - _, ws := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithRootShard()) - clusterName := logicalcluster.Name(ws.Spec.Cluster) - - t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name) - clusterRoleGVR := rbacv1.SchemeGroupVersion.WithResource("clusterroles") - clusterRoleUnstr, err := toUnstructured(clusterRole, "ClusterRole", clusterRoleGVR) - require.NoError(t, err) - _, err = kcpShardClusterDynamicClient.Resource(clusterRoleGVR).Cluster(clusterName.Path()).Create(ctx, clusterRoleUnstr, metav1.CreateOptions{}) - require.NoError(t, err) - - replicateResource(ctx, - t, - server, - kcpShardClusterDynamicClient, - cacheKcpClusterDynamicClient, - clusterName, - "ClusterRoleBinding", - rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"), - &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: withPseudoRandomSuffix("syncer"), - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.SchemeGroupVersion.Group, - Kind: "ClusterRole", - Name: clusterRole.Name, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: "", - Name: "kcp-syncer-0000", - Namespace: "kcp-syncer-namespace", - }, - }, - }, - nil, - ) -} - -// replicateWorkloadsClusterRoleNegativeScenario checks if modified or even deleted cached ClusterRole (related to workloads API) will be reconciled to match the original object. -func replicateWorkloadsClusterRoleBindingNegativeScenario(ctx context.Context, t *testing.T, server framework.RunningServer, kcpShardClusterDynamicClient kcpdynamic.ClusterInterface, cacheKcpClusterDynamicClient kcpdynamic.ClusterInterface) { - t.Helper() - - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: withPseudoRandomSuffix("syncer"), - }, - Rules: []rbacv1.PolicyRule{ - { - Verbs: []string{"sync"}, - APIGroups: []string{"workload.kcp.io"}, - Resources: []string{"synctargets"}, - ResourceNames: []string{"asynctarget"}, - }, - }, - } - - orgPath, _ := framework.NewOrganizationFixture(t, server) - _, ws := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithRootShard()) - clusterName := logicalcluster.Name(ws.Spec.Cluster) - - t.Logf("Create additional ClusterRole %s on the root shard for replication", clusterRole.Name) - clusterRoleGVR := rbacv1.SchemeGroupVersion.WithResource("clusterroles") - clusterRoleUnstr, err := toUnstructured(clusterRole, "ClusterRole", clusterRoleGVR) - require.NoError(t, err) - _, err = kcpShardClusterDynamicClient.Resource(clusterRoleGVR).Cluster(clusterName.Path()).Create(ctx, clusterRoleUnstr, metav1.CreateOptions{}) - require.NoError(t, err) - - replicateResourceNegative( - ctx, - t, - server, - kcpShardClusterDynamicClient, - cacheKcpClusterDynamicClient, - clusterName, - "ClusterRoleBinding", - rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"), - &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: withPseudoRandomSuffix("syncer"), - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.SchemeGroupVersion.Group, - Kind: "ClusterRole", - Name: clusterRole.Name, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: "", - Name: "kcp-syncer-0000", - Namespace: "kcp-syncer-namespace", - }, - }, - }, - nil, - ) -} From 10c3a90a75aad6c3a9f35cc40b77cee15b02a7f0 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 00:46:34 +0200 Subject: [PATCH 09/15] remove transparent-multi-cluster test suite concept / github actions --- .github/workflows/ci.yaml | 130 ----------------------------------- test/e2e/framework/config.go | 2 +- 2 files changed, 1 insertion(+), 131 deletions(-) delete mode 100644 .github/workflows/ci.yaml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml deleted file mode 100644 index d4661e85174..00000000000 --- a/.github/workflows/ci.yaml +++ /dev/null @@ -1,130 +0,0 @@ -name: CI - -on: - push: - branches: - - main - - 'release-*' - pull_request: - branches: - - main - - 'release-*' - paths-ignore: - - "docs/**" - - "**/*.md" - - ".github/ISSUE_TEMPLATE/*" - - ".goreleaser.yaml" - - ".github/workflows/docs-gen-and-push.yaml" - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -# NOTE!!! -# -# If you add a job here that is a REQUIRED status check in GitHub, you MUST add a job with the same name to ci-docs-only.yaml -# -# NOTE!!! - -env: - # etcd uses fsync by default, disable it for running on github actions to avoid disk contention - # xref: https://github.com/kcp-dev/kcp/pull/435/commits/064a517747d69c2cd8f7f8b4a595ad909e595c89 - UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC: true - # always enable the Kubernetes cache mutation detector when running in CI, this guarantees to catch - # potentiall issues with controllers not properly copying objects before mutating them. - KUBE_CACHE_MUTATION_DETECTOR: true - -jobs: - e2e-shared-server: - name: e2e-shared-server - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 - with: - go-version: v1.19 - cache: true - - run: make build - - # Install kind with a local registry - - uses: container-tools/kind-action@v2 - name: Kubernetes KinD Cluster w/local registry - with: - version: v0.17.0 - config: test/e2e/kind/config-calico.yaml - - # Install Calico - - run: |- - kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/release-v3.24/manifests/calico.yaml - kubectl -n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true - - # Install ko - - run: |- - go install github.com/google/ko@latest - - # wait for Calico to be ready - - run: |- - kubectl wait pods -n kube-system -l k8s-app=calico-node --for=condition=Ready --timeout=90s - - - run: |- - LOG_DIR=/tmp/e2e/shared-server/artifacts ARTIFACT_DIR=/tmp/e2e COUNT=2 SUITES=transparent-multi-cluster:requires-kind \ - ./hack/run-with-prometheus.sh make test-e2e-shared - - - uses: cytopia/upload-artifact-retry-action@v0.1.7 - if: ${{ always() }} - with: - name: e2e-shared-server - path: /tmp/e2e/**/artifacts/ - - - uses: cytopia/upload-artifact-retry-action@v0.1.7 - if: ${{ always() }} - with: - name: e2e-shared-server-metrics - path: /tmp/e2e/**/metrics/ - - e2e-sharded: - name: e2e-sharded - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 - with: - go-version: v1.19 - cache: true - - run: make build - - # Install kind with a local registry - - uses: container-tools/kind-action@v2 - name: Kubernetes KinD Cluster w/local registry - with: - version: v0.17.0 - config: test/e2e/kind/config-calico.yaml - - # Install Calico - - run: |- - kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/release-v3.24/manifests/calico.yaml - kubectl -n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true - - # Install ko - - run: |- - go install github.com/google/ko@latest - - # wait for Calico to be ready - - run: |- - kubectl wait pods -n kube-system -l k8s-app=calico-node --for=condition=Ready --timeout=90s - - - run: |- - LOG_DIR=/tmp/e2e/sharded/artifacts ARTIFACT_DIR=/tmp/e2e COUNT=2 SUITES=transparent-multi-cluster:requires-kind \ - ./hack/run-with-prometheus.sh make test-e2e-sharded - - - uses: cytopia/upload-artifact-retry-action@v0.1.7 - if: ${{ always() }} - with: - name: e2e-sharded - path: /tmp/e2e/**/artifacts/ - - - uses: cytopia/upload-artifact-retry-action@v0.1.7 - if: ${{ always() }} - with: - name: e2e-sharded-metrics - path: /tmp/e2e/**/metrics/ diff --git a/test/e2e/framework/config.go b/test/e2e/framework/config.go index c516d9abd8b..0bdbe868260 100644 --- a/test/e2e/framework/config.go +++ b/test/e2e/framework/config.go @@ -103,7 +103,7 @@ func registerFlags(c *testConfig) { flag.StringVar(&c.syncerImage, "syncer-image", "", "The syncer image to use with the pcluster. Requires --pcluster-kubeconfig") flag.StringVar(&c.kcpTestImage, "kcp-test-image", "", "The test image to use with the pcluster. Requires --pcluster-kubeconfig") flag.BoolVar(&c.useDefaultKCPServer, "use-default-kcp-server", false, "Whether to use server configuration from .kcp/admin.kubeconfig.") - flag.StringVar(&c.suites, "suites", "control-plane,transparent-multi-cluster,transparent-multi-cluster:requires-kind", "A comma-delimited list of suites to run.") + flag.StringVar(&c.suites, "suites", "control-plane", "A comma-delimited list of suites to run.") } // WriteLogicalClusterConfig creates a logical cluster config for the given config and From a9e37326f9fcf57d9205bb6e13948454890c84db Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 01:19:08 +0200 Subject: [PATCH 10/15] remove e2e-shared and e2e-sharded make targets --- Makefile | 56 -------------------------------------------------------- 1 file changed, 56 deletions(-) diff --git a/Makefile b/Makefile index 6ae9ac006fe..1b4cc4b73b2 100644 --- a/Makefile +++ b/Makefile @@ -123,10 +123,8 @@ build-all: .PHONY: build-kind-images build-kind-images-ko: require-ko - $(eval SYNCER_IMAGE=$(shell KO_DOCKER_REPO=kind.local KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) ko build --platform=linux/$(ARCH) ./cmd/syncer)) $(eval TEST_IMAGE=$(shell KO_DOCKER_REPO=kind.local KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) ko build --platform=linux/$(ARCH) ./test/e2e/fixtures/kcp-test-image)) build-kind-images: build-kind-images-ko - @test -n "$(SYNCER_IMAGE)" && (echo $(SYNCER_IMAGE) pushed to "$(KIND_CLUSTER_NAME)" kind cluster) || (echo Failed to create syncer image and/or to push it to "$(KIND_CLUSTER_NAME)" kind cluster; exit 1) @test -n "$(TEST_IMAGE)" && (echo $(TEST_IMAGE) pushed to "$(KIND_CLUSTER_NAME)" kind cluster) || (echo Failed to create test image and and/or to push it to "$(KIND_CLUSTER_NAME)" kind cluster; exit 1) install: WHAT ?= ./cmd/... @@ -278,31 +276,6 @@ test-e2e: build-all UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC=true NO_GORUN=1 GOOS=$(OS) GOARCH=$(ARCH) \ $(GO_TEST) -race $(COUNT_ARG) $(PARALLELISM_ARG) $(WHAT) $(TEST_ARGS) $(COMPLETE_SUITES_ARG) -.PHONY: test-e2e-shared -ifdef USE_GOTESTSUM -test-e2e-shared: $(GOTESTSUM) -endif -test-e2e-shared: TEST_ARGS ?= -test-e2e-shared: WHAT ?= ./test/e2e... -test-e2e-shared: WORK_DIR ?= . -ifdef ARTIFACT_DIR -test-e2e-shared: LOG_DIR ?= $(ARTIFACT_DIR)/kcp -else -test-e2e-shared: LOG_DIR ?= $(WORK_DIR)/.kcp -endif -test-e2e-shared: require-kind build-all build-kind-images - mkdir -p "$(LOG_DIR)" "$(WORK_DIR)/.kcp" - kind get kubeconfig > "$(WORK_DIR)/.kcp/kind.kubeconfig" - rm -f "$(WORK_DIR)/.kcp/ready-to-test" - UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC=true NO_GORUN=1 \ - ./bin/test-server --quiet --log-file-path="$(LOG_DIR)/kcp.log" $(TEST_SERVER_ARGS) 2>&1 & PID=$$! && echo "PID $$PID" && \ - trap 'kill -TERM $$PID' TERM INT EXIT && \ - while [ ! -f "$(WORK_DIR)/.kcp/ready-to-test" ]; do sleep 1; done && \ - echo 'Starting test(s)' && \ - NO_GORUN=1 GOOS=$(OS) GOARCH=$(ARCH) \ - $(GO_TEST) -race $(COUNT_ARG) $(PARALLELISM_ARG) $(WHAT) $(TEST_ARGS) \ - -args --use-default-kcp-server --syncer-image="$(SYNCER_IMAGE)" --kcp-test-image="$(TEST_IMAGE)" --pcluster-kubeconfig="$(abspath $(WORK_DIR)/.kcp/kind.kubeconfig)" $(SUITES_ARG) \ - $(if $(value WAIT),|| { echo "Terminated with $$?"; wait "$$PID"; },) .PHONY: test-e2e-shared-minimal ifdef USE_GOTESTSUM @@ -329,35 +302,6 @@ test-e2e-shared-minimal: build-all -args --use-default-kcp-server $(SUITES_ARG) \ $(if $(value WAIT),|| { echo "Terminated with $$?"; wait "$$PID"; },) -.PHONY: test-e2e-sharded -ifdef USE_GOTESTSUM -test-e2e-sharded: $(GOTESTSUM) -endif -test-e2e-sharded: TEST_ARGS ?= -test-e2e-sharded: WHAT ?= ./test/e2e... -test-e2e-sharded: WORK_DIR ?= . -test-e2e-sharded: SHARDS ?= 2 -ifdef ARTIFACT_DIR -test-e2e-sharded: LOG_DIR ?= $(ARTIFACT_DIR)/kcp -else -test-e2e-sharded: LOG_DIR ?= $(WORK_DIR)/.kcp -endif -test-e2e-sharded: require-kind build-all build-kind-images - mkdir -p "$(LOG_DIR)" "$(WORK_DIR)/.kcp" - kind get kubeconfig > "$(WORK_DIR)/.kcp/kind.kubeconfig" - rm -f "$(WORK_DIR)/.kcp/ready-to-test" - UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC=true NO_GORUN=1 \ - ./bin/sharded-test-server --quiet --v=2 --log-dir-path="$(LOG_DIR)" --work-dir-path="$(WORK_DIR)" --shard-run-virtual-workspaces=false $(TEST_SERVER_ARGS) --number-of-shards=$(SHARDS) --cache-synthetic-delay=500ms 2>&1 & PID=$$!; echo "PID $$PID" && \ - trap 'kill -TERM $$PID' TERM INT EXIT && \ - while [ ! -f "$(WORK_DIR)/.kcp/ready-to-test" ]; do sleep 1; done && \ - echo 'Starting test(s)' && \ - NO_GORUN=1 GOOS=$(OS) GOARCH=$(ARCH) \ - $(GO_TEST) -race $(COUNT_ARG) $(PARALLELISM_ARG) $(WHAT) $(TEST_ARGS) \ - -args --use-default-kcp-server --shard-kubeconfigs=root=$(PWD)/.kcp-0/admin.kubeconfig$(shell if [ $(SHARDS) -gt 1 ]; then seq 1 $$[$(SHARDS) - 1]; fi | while read n; do echo -n ",shard-$$n=$(PWD)/.kcp-$$n/admin.kubeconfig"; done) \ - $(SUITES_ARG) \ - --syncer-image="$(SYNCER_IMAGE)" --kcp-test-image="$(TEST_IMAGE)" --pcluster-kubeconfig="$(abspath $(WORK_DIR)/.kcp/kind.kubeconfig)" \ - $(if $(value WAIT),|| { echo "Terminated with $$?"; wait "$$PID"; },) - .PHONY: test-e2e-sharded-minimal ifdef USE_GOTESTSUM test-e2e-sharded-minimal: $(GOTESTSUM) From 62fae7401c449c80a7b1ec62854cf4df37d0f8cf Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 01:20:34 +0200 Subject: [PATCH 11/15] remove unused -syncer-image flag from e2e framework --- test/e2e/framework/config.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/e2e/framework/config.go b/test/e2e/framework/config.go index 0bdbe868260..e2adfa83953 100644 --- a/test/e2e/framework/config.go +++ b/test/e2e/framework/config.go @@ -43,7 +43,6 @@ func init() { } type testConfig struct { - syncerImage string kcpTestImage string pclusterKubeconfig string kcpKubeconfig string @@ -54,10 +53,6 @@ type testConfig struct { var TestConfig *testConfig -func (c *testConfig) SyncerImage() string { - return c.syncerImage -} - func (c *testConfig) KCPTestImage() string { return c.kcpTestImage } @@ -100,7 +95,6 @@ func registerFlags(c *testConfig) { flag.StringVar(&c.kcpKubeconfig, "kcp-kubeconfig", "", "Path to the kubeconfig for a kcp server.") flag.Var(cliflag.NewMapStringString(&c.shardKubeconfigs), "shard-kubeconfigs", "Paths to the kubeconfigs for a kcp shard server in the format =. If unset, kcp-kubeconfig is used.") flag.StringVar(&c.pclusterKubeconfig, "pcluster-kubeconfig", "", "Path to the kubeconfig for a kubernetes cluster to sync to. Requires --syncer-image.") - flag.StringVar(&c.syncerImage, "syncer-image", "", "The syncer image to use with the pcluster. Requires --pcluster-kubeconfig") flag.StringVar(&c.kcpTestImage, "kcp-test-image", "", "The test image to use with the pcluster. Requires --pcluster-kubeconfig") flag.BoolVar(&c.useDefaultKCPServer, "use-default-kcp-server", false, "Whether to use server configuration from .kcp/admin.kubeconfig.") flag.StringVar(&c.suites, "suites", "control-plane", "A comma-delimited list of suites to run.") From 0ee79b9bc5cb8a79903d6b6eb855b28afb7637d8 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 12:41:07 +0200 Subject: [PATCH 12/15] remove tmc-related logging constants and tmc server controller handling --- tmc/pkg/logging/constants.go | 40 --------------- tmc/pkg/server/controllers.go | 72 --------------------------- tmc/pkg/server/options/controllers.go | 53 -------------------- tmc/pkg/server/options/options.go | 25 +++------- tmc/pkg/server/server.go | 12 +---- 5 files changed, 8 insertions(+), 194 deletions(-) delete mode 100644 tmc/pkg/logging/constants.go delete mode 100644 tmc/pkg/server/controllers.go delete mode 100644 tmc/pkg/server/options/controllers.go diff --git a/tmc/pkg/logging/constants.go b/tmc/pkg/logging/constants.go deleted file mode 100644 index a5f65dbb831..00000000000 --- a/tmc/pkg/logging/constants.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package logging supplies common constants to ensure consistent use of structured logs. -package logging - -const ( - // SyncTargetKeyPrefix is the prefix used for all the keys related to a SyncTarget. - SyncTargetKeyPrefix = "syncTarget." - - // SyncTargetWorkspace is used to specify a workspace when a log is related to a SyncTarget. - SyncTargetWorkspace = SyncTargetKeyPrefix + "workspace" - // SyncTargetNamespace is used to specify a namespace when a log is related to a SyncTarget. - SyncTargetNamespace = SyncTargetKeyPrefix + "namespace" - // SyncTargetName is used to specify a name when a log is related to a SyncTarget. - SyncTargetName = SyncTargetKeyPrefix + "name" - // SyncTargetKey is used to specify the obfuscated key of a SyncTarget as used in the Syncer labels and annotations. - SyncTargetKey = SyncTargetKeyPrefix + "key" - - // DownstreamKeyPrefix is the prefix used for all the keys related to a downstream object. - DownstreamKeyPrefix = "downstream." - - // DownstreamNamespace is used to specify a namespace when a log is related to a downstream object. - DownstreamNamespace = DownstreamKeyPrefix + "namespace" - // DownstreamName is used to specify a name when a log is related to a downstream object. - DownstreamName = DownstreamKeyPrefix + "name" -) diff --git a/tmc/pkg/server/controllers.go b/tmc/pkg/server/controllers.go deleted file mode 100644 index 1a639b6750e..00000000000 --- a/tmc/pkg/server/controllers.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2023 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package server - -import ( - "context" - "fmt" - _ "net/http/pprof" - - kcpapiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/kcp/clientset/versioned" - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/client-go/rest" - "k8s.io/klog/v2" - - "github.com/kcp-dev/kcp/pkg/reconciler/apis/apiresource" - kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" -) - -func postStartHookName(controllerName string) string { - return fmt.Sprintf("kcp-tmc-start-%s", controllerName) -} - -func (s *Server) installApiResourceController(ctx context.Context, config *rest.Config) error { - config = rest.CopyConfig(config) - config = rest.AddUserAgent(config, apiresource.ControllerName) - - crdClusterClient, err := kcpapiextensionsclientset.NewForConfig(config) - if err != nil { - return err - } - kcpClusterClient, err := kcpclientset.NewForConfig(config) - if err != nil { - return err - } - - c, err := apiresource.NewController( - crdClusterClient, - kcpClusterClient, - s.Core.KcpSharedInformerFactory.Apiresource().V1alpha1().NegotiatedAPIResources(), - s.Core.KcpSharedInformerFactory.Apiresource().V1alpha1().APIResourceImports(), - s.Core.ApiExtensionsSharedInformerFactory.Apiextensions().V1().CustomResourceDefinitions(), - ) - if err != nil { - return err - } - - return s.Core.AddPostStartHook(postStartHookName(apiresource.ControllerName), func(hookContext genericapiserver.PostStartHookContext) error { - logger := klog.FromContext(ctx).WithValues("postStartHook", postStartHookName(apiresource.ControllerName)) - if err := s.Core.WaitForSync(hookContext.StopCh); err != nil { - logger.Error(err, "failed to finish post-start-hook") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - - go c.Start(ctx, s.Options.Controllers.ApiResource.NumThreads) - - return nil - }) -} diff --git a/tmc/pkg/server/options/controllers.go b/tmc/pkg/server/options/controllers.go deleted file mode 100644 index 900c5bc8d3b..00000000000 --- a/tmc/pkg/server/options/controllers.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - "github.com/spf13/pflag" - - apiresource "github.com/kcp-dev/kcp/pkg/reconciler/apis/apiresource/options" -) - -type Controllers struct { - ApiResource ApiResourceController -} - -type ApiResourceController = apiresource.Options - -func NewTmcControllers() *Controllers { - return &Controllers{ - ApiResource: *apiresource.NewOptions(), - } -} - -func (c *Controllers) AddFlags(fs *pflag.FlagSet) { - c.ApiResource.AddFlags(fs) -} - -func (c *Controllers) Complete(rootDir string) error { - return nil -} - -func (c *Controllers) Validate() []error { - var errs []error - - if err := c.ApiResource.Validate(); err != nil { - errs = append(errs, err) - } - - return errs -} diff --git a/tmc/pkg/server/options/options.go b/tmc/pkg/server/options/options.go index 3092afc5e7e..0943efb66ff 100644 --- a/tmc/pkg/server/options/options.go +++ b/tmc/pkg/server/options/options.go @@ -23,8 +23,7 @@ import ( ) type Options struct { - Core kcpcoreoptions.Options - TmcControllers Controllers + Core kcpcoreoptions.Options Extra ExtraOptions } @@ -33,8 +32,7 @@ type ExtraOptions struct { } type completedOptions struct { - Core kcpcoreoptions.CompletedOptions - Controllers Controllers + Core kcpcoreoptions.CompletedOptions Extra ExtraOptions } @@ -46,8 +44,7 @@ type CompletedOptions struct { // NewOptions creates a new Options with default parameters. func NewOptions(rootDir string) *Options { o := &Options{ - Core: *kcpcoreoptions.NewOptions(rootDir), - TmcControllers: *NewTmcControllers(), + Core: *kcpcoreoptions.NewOptions(rootDir), Extra: ExtraOptions{}, } @@ -57,16 +54,10 @@ func NewOptions(rootDir string) *Options { func (o *Options) AddFlags(fss *cliflag.NamedFlagSets) { o.Core.AddFlags(fss) - o.TmcControllers.AddFlags(fss.FlagSet("KCP Controllers")) } func (o *CompletedOptions) Validate() []error { - var errs []error - - errs = append(errs, o.Core.Validate()...) - errs = append(errs, o.Controllers.Validate()...) - - return errs + return o.Core.Validate() } func (o *Options) Complete(rootDir string) (*CompletedOptions, error) { @@ -74,15 +65,11 @@ func (o *Options) Complete(rootDir string) (*CompletedOptions, error) { if err != nil { return nil, err } - if err := o.TmcControllers.Complete(rootDir); err != nil { - return nil, err - } return &CompletedOptions{ completedOptions: &completedOptions{ - Core: *core, - Controllers: o.TmcControllers, - Extra: o.Extra, + Core: *core, + Extra: o.Extra, }, }, nil } diff --git a/tmc/pkg/server/server.go b/tmc/pkg/server/server.go index ad51483654a..91e262d348b 100644 --- a/tmc/pkg/server/server.go +++ b/tmc/pkg/server/server.go @@ -22,7 +22,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/client-go/rest" "k8s.io/klog/v2" configrootcompute "github.com/kcp-dev/kcp/config/rootcompute" @@ -55,9 +54,7 @@ func (s *Server) Run(ctx context.Context) error { logger := klog.FromContext(ctx).WithValues("component", "kcp") ctx = klog.NewContext(ctx, logger) - controllerConfig := rest.CopyConfig(s.Core.IdentityConfig) - - enabled := sets.New[string](s.Options.Core.Controllers.IndividuallyEnabled...) + enabled := sets.New(s.Options.Core.Controllers.IndividuallyEnabled...) if len(enabled) > 0 { logger.WithValues("controllers", enabled).Info("starting controllers individually") } @@ -76,7 +73,7 @@ func (s *Server) Run(ctx context.Context) error { if err := configrootcompute.Bootstrap(goContext(hookContext), s.Core.BootstrapApiExtensionsClusterClient, s.Core.BootstrapDynamicClusterClient, - sets.New[string](s.Core.Options.Extra.BatteriesIncluded...), + sets.New(s.Core.Options.Extra.BatteriesIncluded...), ); err != nil { logger.Error(err, "failed to bootstrap root compute workspace") return nil // don't klog.Fatal. This only happens when context is cancelled. @@ -87,11 +84,6 @@ func (s *Server) Run(ctx context.Context) error { }); err != nil { return err } - - // TODO(marun) Consider enabling each controller via a separate flag - if err := s.installApiResourceController(ctx, controllerConfig); err != nil { - return err - } } return s.Core.Run(ctx) From 11b6f4d45e155a4f9df01e059dd287ae4f8729ff Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 12:45:01 +0200 Subject: [PATCH 13/15] remove kcp-test-image code --- .github/workflows/kcp-test-image.yaml | 26 ------- Makefile | 6 -- test/e2e/fixtures/kcp-test-image/icc-test.go | 79 -------------------- test/e2e/framework/config.go | 12 --- 4 files changed, 123 deletions(-) delete mode 100644 .github/workflows/kcp-test-image.yaml delete mode 100644 test/e2e/fixtures/kcp-test-image/icc-test.go diff --git a/.github/workflows/kcp-test-image.yaml b/.github/workflows/kcp-test-image.yaml deleted file mode 100644 index dea0b3ea18d..00000000000 --- a/.github/workflows/kcp-test-image.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Build in-cluster configuration test Image - -permissions: - packages: write - -on: - push: - branches: - - main - - 'release-*' - tags: - - 'v*' - -jobs: - syncer-image: - name: Build in-cluster configuration test Image - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 - with: - go-version: v1.19 - - # Build and push the kcp test image, tagged with the commit SHA and the branch name. - - uses: imjasonh/setup-ko@v0.6 - - run: ko publish -B --platform all ./test/e2e/fixtures/kcp-test-image -t $(git rev-parse --short "$GITHUB_SHA"),${{ github.ref_name }} diff --git a/Makefile b/Makefile index 1b4cc4b73b2..1686cb2cb84 100644 --- a/Makefile +++ b/Makefile @@ -121,12 +121,6 @@ build: require-jq require-go require-git verify-go-versions ## Build the project build-all: GOOS=$(OS) GOARCH=$(ARCH) $(MAKE) build WHAT='./cmd/...' -.PHONY: build-kind-images -build-kind-images-ko: require-ko - $(eval TEST_IMAGE=$(shell KO_DOCKER_REPO=kind.local KIND_CLUSTER_NAME=$(KIND_CLUSTER_NAME) ko build --platform=linux/$(ARCH) ./test/e2e/fixtures/kcp-test-image)) -build-kind-images: build-kind-images-ko - @test -n "$(TEST_IMAGE)" && (echo $(TEST_IMAGE) pushed to "$(KIND_CLUSTER_NAME)" kind cluster) || (echo Failed to create test image and and/or to push it to "$(KIND_CLUSTER_NAME)" kind cluster; exit 1) - install: WHAT ?= ./cmd/... install: require-jq require-go require-git verify-go-versions ## Install the project GOOS=$(OS) GOARCH=$(ARCH) CGO_ENABLED=0 go install -ldflags="$(LDFLAGS)" $(WHAT) diff --git a/test/e2e/fixtures/kcp-test-image/icc-test.go b/test/e2e/fixtures/kcp-test-image/icc-test.go deleted file mode 100644 index 58043a55beb..00000000000 --- a/test/e2e/fixtures/kcp-test-image/icc-test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "errors" - "log" - "os" - "os/signal" - "strings" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubernetesclient "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -func main() { - ctx, _ := signal.NotifyContext(context.Background(), os.Interrupt) - configMapName := os.Getenv("CONFIGMAP_NAME") - - if configMapName == "" { - log.Panicln("ENV variable CONFIGMAP_NAME not specified") - } - - namespace, err := DetectNamespace() - if err != nil { - log.Panicln("no namespace detected", err) - } - - config, err := rest.InClusterConfig() - if err != nil { - log.Panicln("failed to create in-cluster config", err) - } - - clientset, err := kubernetesclient.NewForConfig(config) - if err != nil { - log.Panicln("failed to create clientset with in-cluster config", config, err) - } - - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - }, - } - _, err = clientset.CoreV1().ConfigMaps(namespace).Create(ctx, configMap, metav1.CreateOptions{}) - if err != nil { - log.Panicln("failed to create configmap", err) - } - - log.Printf("configmap %s created. Going to sleep.\n", configMapName) - - <-ctx.Done() -} - -func DetectNamespace() (string, error) { - if namespaceData, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if namespace := strings.TrimSpace(string(namespaceData)); len(namespace) > 0 { - return namespace, nil - } - return "", err - } - return "", errors.New("failed to detect in-cluster namespace") -} diff --git a/test/e2e/framework/config.go b/test/e2e/framework/config.go index e2adfa83953..ffd11538c97 100644 --- a/test/e2e/framework/config.go +++ b/test/e2e/framework/config.go @@ -43,8 +43,6 @@ func init() { } type testConfig struct { - kcpTestImage string - pclusterKubeconfig string kcpKubeconfig string shardKubeconfigs map[string]string useDefaultKCPServer bool @@ -53,14 +51,6 @@ type testConfig struct { var TestConfig *testConfig -func (c *testConfig) KCPTestImage() string { - return c.kcpTestImage -} - -func (c *testConfig) PClusterKubeconfig() string { - return c.pclusterKubeconfig -} - func (c *testConfig) KCPKubeconfig() string { // TODO(marun) How to validate before use given that the testing package is calling flags.Parse()? if c.useDefaultKCPServer && len(c.kcpKubeconfig) > 0 { @@ -94,8 +84,6 @@ func init() { func registerFlags(c *testConfig) { flag.StringVar(&c.kcpKubeconfig, "kcp-kubeconfig", "", "Path to the kubeconfig for a kcp server.") flag.Var(cliflag.NewMapStringString(&c.shardKubeconfigs), "shard-kubeconfigs", "Paths to the kubeconfigs for a kcp shard server in the format =. If unset, kcp-kubeconfig is used.") - flag.StringVar(&c.pclusterKubeconfig, "pcluster-kubeconfig", "", "Path to the kubeconfig for a kubernetes cluster to sync to. Requires --syncer-image.") - flag.StringVar(&c.kcpTestImage, "kcp-test-image", "", "The test image to use with the pcluster. Requires --pcluster-kubeconfig") flag.BoolVar(&c.useDefaultKCPServer, "use-default-kcp-server", false, "Whether to use server configuration from .kcp/admin.kubeconfig.") flag.StringVar(&c.suites, "suites", "control-plane", "A comma-delimited list of suites to run.") } From 3e63afb602e41ddd6f73a174a0f60ccb6aa7dfd5 Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 12:50:34 +0200 Subject: [PATCH 14/15] remove last remnants of pkg/syncer --- pkg/syncer/shared/cleaner.go | 27 ------- pkg/syncer/shared/helpers.go | 91 ----------------------- pkg/syncer/shared/helpers_test.go | 82 --------------------- pkg/syncer/shared/namespace.go | 108 ---------------------------- pkg/syncer/shared/namespace_test.go | 101 -------------------------- 5 files changed, 409 deletions(-) delete mode 100644 pkg/syncer/shared/cleaner.go delete mode 100644 pkg/syncer/shared/helpers.go delete mode 100644 pkg/syncer/shared/helpers_test.go delete mode 100644 pkg/syncer/shared/namespace.go delete mode 100644 pkg/syncer/shared/namespace_test.go diff --git a/pkg/syncer/shared/cleaner.go b/pkg/syncer/shared/cleaner.go deleted file mode 100644 index 381be51a5dd..00000000000 --- a/pkg/syncer/shared/cleaner.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shared - -// Cleaner is an interface for cleaning up resources. -type Cleaner interface { - // PlanCleaning adds the key to the list of keys to be cleaned up. - // If the resource was already planned for cleaning the previous timestamp is kept. - PlanCleaning(key string) - // CancelCleaning removes the key from the list of keys to be cleaned up. - // If it wasn't planned for deletion, it does nothing. - CancelCleaning(key string) -} diff --git a/pkg/syncer/shared/helpers.go b/pkg/syncer/shared/helpers.go deleted file mode 100644 index 8c9a094d485..00000000000 --- a/pkg/syncer/shared/helpers.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shared - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "math/big" - "strings" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/martinlindhe/base36" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" -) - -// SyncableClusterScopedResources holds a set of cluster-wide GVR that are allowed to be synced. -var SyncableClusterScopedResources = sets.New[string](schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"}.String()) - -// GetUpstreamResourceName returns the name with which the resource is known upstream. -func GetUpstreamResourceName(downstreamResourceGVR schema.GroupVersionResource, downstreamResourceName string) string { - configMapGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} - secretGVR := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} - - if downstreamResourceGVR == configMapGVR && downstreamResourceName == "kcp-root-ca.crt" { - return "kube-root-ca.crt" - } - if downstreamResourceGVR == secretGVR && strings.HasPrefix(downstreamResourceName, "kcp-default-token") { - return strings.TrimPrefix(downstreamResourceName, "kcp-") - } - return downstreamResourceName -} - -// GetDNSID returns a unique ID for DNS object derived from the sync target name, its UID and workspace. It's -// a valid DNS segment and can be used as namespace or object names. -func GetDNSID(clusterName logicalcluster.Name, syncTargetUID types.UID, syncTargetName string) string { - syncerHash := sha256.Sum224([]byte(syncTargetUID)) - uid36hash := strings.ToLower(base36.EncodeBytes(syncerHash[:])) - workspaceHash := sha256.Sum224([]byte(clusterName.String())) - workspace36hash := strings.ToLower(base36.EncodeBytes(workspaceHash[:])) - - return fmt.Sprintf("kcp-dns-%s-%s-%s", syncTargetName, uid36hash[:8], workspace36hash[:8]) -} - -// GetTenantID encodes the KCP tenant to which the namespace designated by the given -// NamespaceLocator belongs. It is based on the NamespaceLocator, but with an empty -// namespace value. The value will be the same for all downstream namespaces originating -// from the same KCP workspace / SyncTarget. -// The encoding is repeatable. -func GetTenantID(l NamespaceLocator) (string, error) { - clusterWideLocator := NamespaceLocator{ - SyncTarget: l.SyncTarget, - ClusterName: l.ClusterName, - } - - b, err := json.Marshal(clusterWideLocator) - if err != nil { - return "", err - } - - hash := sha256.Sum224(b) - var i big.Int - i.SetBytes(hash[:]) - return i.Text(62), nil -} - -func ContainsGVR(gvrs []schema.GroupVersionResource, gvr schema.GroupVersionResource) bool { - for _, item := range gvrs { - if gvr == item { - return true - } - } - return false -} diff --git a/pkg/syncer/shared/helpers_test.go b/pkg/syncer/shared/helpers_test.go deleted file mode 100644 index 01bf197cb34..00000000000 --- a/pkg/syncer/shared/helpers_test.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shared - -import ( - "testing" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func TestGetUpstreamResourceName(t *testing.T) { - tests := []struct { - name string - gvr schema.GroupVersionResource - resource string - want string - }{ - { - name: "kcp-root-ca.crt configmap, should be translated to kube-root-ca.crt", - gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, - resource: "kcp-root-ca.crt", - want: "kube-root-ca.crt", - }, - { - name: "not kcp-root-ca.crt configmap, should not be translated", - gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, - resource: "my-configmap", - want: "my-configmap", - }, - { - name: "a default token secret with kcp prefix, should be translated", - gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, - resource: "kcp-default-token-1234", - want: "default-token-1234", - }, - { - name: "a non default token secret without kcp prefix, should not be translated", - gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, - resource: "my-super-secret", - want: "my-super-secret", - }, - { - name: "a different GVR than configmap or secret, should not be translated", - gvr: schema.GroupVersionResource{Group: "random", Version: "v1", Resource: "another"}, - resource: "kcp-foo", - want: "kcp-foo", - }, - { - name: "a configmap with a kcp prefix, shouldn't be translated", - gvr: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, - resource: "kcp-default-token-1234", - want: "kcp-default-token-1234", - }, - { - name: "invalid GVR, should not be translated", - gvr: schema.GroupVersionResource{Group: "", Version: "", Resource: ""}, - resource: "kcp-foo", - want: "kcp-foo", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := GetUpstreamResourceName(tt.gvr, tt.resource); got != tt.want { - t.Errorf("GetUpstreamResourceName() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/syncer/shared/namespace.go b/pkg/syncer/shared/namespace.go deleted file mode 100644 index acc114a46b7..00000000000 --- a/pkg/syncer/shared/namespace.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shared - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "strings" - - "github.com/kcp-dev/logicalcluster/v3" - "github.com/martinlindhe/base36" - - "k8s.io/apimachinery/pkg/types" -) - -const ( - NamespaceLocatorAnnotation = "kcp.io/namespace-locator" - TenantIDLabel = "kcp.io/tenant-id" -) - -// NamespaceLocator stores a logical cluster and namespace and is used -// as the source for the mapped namespace name in a physical cluster. -type NamespaceLocator struct { - SyncTarget SyncTargetLocator `json:"syncTarget"` - ClusterName logicalcluster.Name `json:"cluster,omitempty"` - Namespace string `json:"namespace"` -} - -type SyncTargetLocator struct { - ClusterName string `json:"cluster,omitempty"` - DeprecatedPath string `json:"path,omitempty"` - Name string `json:"name"` - UID types.UID `json:"uid"` -} - -func NewNamespaceLocator(workspace, syncTargetClusterName logicalcluster.Name, syncTargetUID types.UID, syncTargetName, upstreamNamespace string) NamespaceLocator { - return NamespaceLocator{ - SyncTarget: SyncTargetLocator{ - ClusterName: syncTargetClusterName.String(), - Name: syncTargetName, - UID: syncTargetUID, - }, - ClusterName: workspace, - Namespace: upstreamNamespace, - } -} - -func NewNamespaceLocatorV060(workspace, syncTargetClusterName logicalcluster.Name, syncTargetUID types.UID, syncTargetName, upstreamNamespace string) NamespaceLocator { - return NamespaceLocator{ - SyncTarget: SyncTargetLocator{ - DeprecatedPath: syncTargetClusterName.String(), - Name: syncTargetName, - UID: syncTargetUID, - }, - ClusterName: workspace, - Namespace: upstreamNamespace, - } -} - -func LocatorFromAnnotations(annotations map[string]string) (*NamespaceLocator, bool, error) { - annotation, ok := annotations[NamespaceLocatorAnnotation] - if !ok { - return nil, false, nil - } - var locator NamespaceLocator - if err := json.Unmarshal([]byte(annotation), &locator); err != nil { - return nil, false, err - } - - // get us from v0.6.0 locators (using syncTarget.path) to v0.6.1+ (using syncTarget.workspace) - if locator.SyncTarget.ClusterName == "" { - locator.SyncTarget.ClusterName = locator.SyncTarget.DeprecatedPath - } - locator.SyncTarget.DeprecatedPath = "" - - return &locator, true, nil -} - -// PhysicalClusterNamespaceName encodes the NamespaceLocator into a new -// namespace name for use on a physical cluster. The encoding is repeatable. -func PhysicalClusterNamespaceName(l NamespaceLocator) (string, error) { - b, err := json.Marshal(l) - if err != nil { - return "", err - } - // hash the marshalled locator - hash := sha256.Sum224(b) - // convert the hash to base36 (alphanumeric) to decrease collision probabilities - base36hash := strings.ToLower(base36.EncodeBytes(hash[:])) - // use 12 chars of the base36hash, should be enough to avoid collisions and - // keep the namespaces short enough. - return fmt.Sprintf("kcp-%s", base36hash[:12]), nil -} diff --git a/pkg/syncer/shared/namespace_test.go b/pkg/syncer/shared/namespace_test.go deleted file mode 100644 index 165fbce77e5..00000000000 --- a/pkg/syncer/shared/namespace_test.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shared - -import ( - "reflect" - "strings" - "testing" - - "github.com/kcp-dev/logicalcluster/v3" -) - -func TestLocatorFromAnnotations(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - want *NamespaceLocator - wantFound bool - wantErrs []string - }{ - { - name: "no annotation", - wantFound: false, - }, - { - name: "garbage", - annotations: map[string]string{ - NamespaceLocatorAnnotation: "garbage", - }, - wantErrs: []string{"invalid character"}, - }, - { - name: "happy case", - annotations: map[string]string{ - NamespaceLocatorAnnotation: `{"syncTarget":{"cluster":"test-workspace","name":"test-name","uid":"test-uid"},"cluster":"test-workspace","namespace":"test-namespace"}`, - }, - want: &NamespaceLocator{ - SyncTarget: SyncTargetLocator{ - ClusterName: "test-workspace", - Name: "test-name", - UID: "test-uid", - }, - ClusterName: logicalcluster.Name("test-workspace"), - Namespace: "test-namespace", - }, - wantFound: true, - }, - { - name: "format up to v0.6.0", - annotations: map[string]string{ - NamespaceLocatorAnnotation: `{"syncTarget":{"path":"test-workspace","name":"test-name","uid":"test-uid"},"cluster":"test-workspace","namespace":"test-namespace"}`, - }, - want: &NamespaceLocator{ - SyncTarget: SyncTargetLocator{ - ClusterName: "test-workspace", - Name: "test-name", - UID: "test-uid", - }, - ClusterName: logicalcluster.Name("test-workspace"), - Namespace: "test-namespace", - }, - wantFound: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, gotFound, err := LocatorFromAnnotations(tt.annotations) - if (err != nil) != (len(tt.wantErrs) > 0) { - t.Errorf("LocatorFromAnnotations() error = %q, wantErrs %v", err.Error(), tt.wantErrs) - return - } else if err != nil { - for _, wantErr := range tt.wantErrs { - if !strings.Contains(err.Error(), wantErr) { - t.Errorf("LocatorFromAnnotations() error = %q, wantErrs %q", err.Error(), wantErr) - return - } - } - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("LocatorFromAnnotations() got = %v, want %v", got, tt.want) - } - if gotFound != tt.wantFound { - t.Errorf("LocatorFromAnnotations() gotFound = %v, want %v", gotFound, tt.wantFound) - } - }) - } -} From c2b3247ecf907d3f265724d8793c3b354acf5b1d Mon Sep 17 00:00:00 2001 From: Christoph Mewes Date: Thu, 1 Jun 2023 13:00:18 +0200 Subject: [PATCH 15/15] now that tmc/ is just an empty wrapper around pkg/server, get rid of it entirely --- cmd/kcp/kcp.go | 16 ++--- cmd/kcp/options/options.go | 8 +-- test/e2e/framework/kcp.go | 6 +- tmc/pkg/server/config.go | 77 ---------------------- tmc/pkg/server/options/options.go | 75 ---------------------- tmc/pkg/server/server.go | 102 ------------------------------ 6 files changed, 15 insertions(+), 269 deletions(-) delete mode 100644 tmc/pkg/server/config.go delete mode 100644 tmc/pkg/server/options/options.go delete mode 100644 tmc/pkg/server/server.go diff --git a/cmd/kcp/kcp.go b/cmd/kcp/kcp.go index b15b652aef3..0f57063d12b 100644 --- a/cmd/kcp/kcp.go +++ b/cmd/kcp/kcp.go @@ -42,7 +42,7 @@ import ( "github.com/kcp-dev/kcp/pkg/cmd/help" "github.com/kcp-dev/kcp/pkg/embeddedetcd" kcpfeatures "github.com/kcp-dev/kcp/pkg/features" - tmcserver "github.com/kcp-dev/kcp/tmc/pkg/server" + "github.com/kcp-dev/kcp/pkg/server" ) func main() { @@ -83,7 +83,7 @@ func main() { } serverOptions := options.NewOptions(rootDir) - serverOptions.Server.Core.GenericControlPlane.Logs.Verbosity = logsapiv1.VerbosityLevel(2) + serverOptions.Server.GenericControlPlane.Logs.Verbosity = logsapiv1.VerbosityLevel(2) startCmd := &cobra.Command{ Use: "start", @@ -105,7 +105,7 @@ func main() { }, RunE: func(cmd *cobra.Command, args []string) error { // run as early as possible to avoid races later when some components (e.g. grpc) start early using klog - if err := logsapiv1.ValidateAndApply(serverOptions.Server.Core.GenericControlPlane.Logs, kcpfeatures.DefaultFeatureGate); err != nil { + if err := logsapiv1.ValidateAndApply(serverOptions.Server.GenericControlPlane.Logs, kcpfeatures.DefaultFeatureGate); err != nil { return err } @@ -119,9 +119,9 @@ func main() { } logger := klog.FromContext(cmd.Context()) - logger.Info("running with selected batteries", "batteries", strings.Join(completed.Server.Core.Extra.BatteriesIncluded, ",")) + logger.Info("running with selected batteries", "batteries", strings.Join(completed.Server.Extra.BatteriesIncluded, ",")) - config, err := tmcserver.NewConfig(completed.Server) + config, err := server.NewConfig(completed.Server) if err != nil { return err } @@ -134,13 +134,13 @@ func main() { ctx := genericapiserver.SetupSignalContext() // the etcd server must be up before NewServer because storage decorators access it right away - if completedConfig.Core.EmbeddedEtcd.Config != nil { - if err := embeddedetcd.NewServer(completedConfig.Core.EmbeddedEtcd).Run(ctx); err != nil { + if completedConfig.EmbeddedEtcd.Config != nil { + if err := embeddedetcd.NewServer(completedConfig.EmbeddedEtcd).Run(ctx); err != nil { return err } } - s, err := tmcserver.NewServer(completedConfig) + s, err := server.NewServer(completedConfig) if err != nil { return err } diff --git a/cmd/kcp/options/options.go b/cmd/kcp/options/options.go index c839b292c46..71a26e21c3c 100644 --- a/cmd/kcp/options/options.go +++ b/cmd/kcp/options/options.go @@ -22,14 +22,14 @@ import ( cliflag "k8s.io/component-base/cli/flag" kcpcoreoptions "github.com/kcp-dev/kcp/cmd/kcp-core/options" - tmcserveroptions "github.com/kcp-dev/kcp/tmc/pkg/server/options" + serveroptions "github.com/kcp-dev/kcp/pkg/server/options" ) type Options struct { Output io.Writer Generic kcpcoreoptions.GenericOptions - Server tmcserveroptions.Options + Server serveroptions.Options Extra ExtraOptions } @@ -39,7 +39,7 @@ func NewOptions(rootDir string) *Options { opts := &Options{ Output: nil, - Server: *tmcserveroptions.NewOptions(rootDir), + Server: *serveroptions.NewOptions(rootDir), Generic: *kcpcoreoptions.NewGeneric(rootDir), Extra: ExtraOptions{}, } @@ -51,7 +51,7 @@ type completedOptions struct { Output io.Writer Generic kcpcoreoptions.GenericOptions - Server tmcserveroptions.CompletedOptions + Server serveroptions.CompletedOptions Extra ExtraOptions } diff --git a/test/e2e/framework/kcp.go b/test/e2e/framework/kcp.go index f1935b6142b..564a12837b0 100644 --- a/test/e2e/framework/kcp.go +++ b/test/e2e/framework/kcp.go @@ -61,10 +61,10 @@ import ( kcpoptions "github.com/kcp-dev/kcp/cmd/kcp/options" "github.com/kcp-dev/kcp/cmd/sharded-test-server/third_party/library-go/crypto" "github.com/kcp-dev/kcp/pkg/embeddedetcd" + "github.com/kcp-dev/kcp/pkg/server" corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" kcpclientset "github.com/kcp-dev/kcp/sdk/client/clientset/versioned/cluster" kubefixtures "github.com/kcp-dev/kcp/test/e2e/fixtures/kube" - "github.com/kcp-dev/kcp/tmc/pkg/server" ) // TestServerArgs returns the set of kcp args used to start a test @@ -664,8 +664,8 @@ func (c *kcpServer) Run(opts ...RunOption) error { } // the etcd server must be up before NewServer because storage decorators access it right away - if completedConfig.Core.EmbeddedEtcd.Config != nil { - if err := embeddedetcd.NewServer(completedConfig.Core.EmbeddedEtcd).Run(ctx); err != nil { + if completedConfig.EmbeddedEtcd.Config != nil { + if err := embeddedetcd.NewServer(completedConfig.EmbeddedEtcd).Run(ctx); err != nil { return err } } diff --git a/tmc/pkg/server/config.go b/tmc/pkg/server/config.go deleted file mode 100644 index 9746dc738a6..00000000000 --- a/tmc/pkg/server/config.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package server - -import ( - _ "net/http/pprof" - - coreserver "github.com/kcp-dev/kcp/pkg/server" - "github.com/kcp-dev/kcp/tmc/pkg/server/options" -) - -type Config struct { - Options options.CompletedOptions - - Core *coreserver.Config - - ExtraConfig -} - -type ExtraConfig struct { -} - -type completedConfig struct { - Options options.CompletedOptions - Core coreserver.CompletedConfig - - ExtraConfig -} - -type CompletedConfig struct { - // Embed a private pointer that cannot be instantiated outside of this package. - *completedConfig -} - -// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. -func (c *Config) Complete() (CompletedConfig, error) { - core, err := c.Core.Complete() - if err != nil { - return CompletedConfig{}, err - } - - return CompletedConfig{&completedConfig{ - Options: c.Options, - Core: core, - - ExtraConfig: c.ExtraConfig, - }}, nil -} - -func NewConfig(opts options.CompletedOptions) (*Config, error) { - core, err := coreserver.NewConfig(opts.Core) - if err != nil { - return nil, err - } - - c := &Config{ - Options: opts, - Core: core, - ExtraConfig: ExtraConfig{}, - } - - return c, nil -} diff --git a/tmc/pkg/server/options/options.go b/tmc/pkg/server/options/options.go deleted file mode 100644 index 0943efb66ff..00000000000 --- a/tmc/pkg/server/options/options.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2022 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package options - -import ( - cliflag "k8s.io/component-base/cli/flag" - - kcpcoreoptions "github.com/kcp-dev/kcp/pkg/server/options" -) - -type Options struct { - Core kcpcoreoptions.Options - - Extra ExtraOptions -} - -type ExtraOptions struct { -} - -type completedOptions struct { - Core kcpcoreoptions.CompletedOptions - - Extra ExtraOptions -} - -type CompletedOptions struct { - *completedOptions -} - -// NewOptions creates a new Options with default parameters. -func NewOptions(rootDir string) *Options { - o := &Options{ - Core: *kcpcoreoptions.NewOptions(rootDir), - - Extra: ExtraOptions{}, - } - - return o -} - -func (o *Options) AddFlags(fss *cliflag.NamedFlagSets) { - o.Core.AddFlags(fss) -} - -func (o *CompletedOptions) Validate() []error { - return o.Core.Validate() -} - -func (o *Options) Complete(rootDir string) (*CompletedOptions, error) { - core, err := o.Core.Complete(rootDir) - if err != nil { - return nil, err - } - - return &CompletedOptions{ - completedOptions: &completedOptions{ - Core: *core, - Extra: o.Extra, - }, - }, nil -} diff --git a/tmc/pkg/server/server.go b/tmc/pkg/server/server.go deleted file mode 100644 index 91e262d348b..00000000000 --- a/tmc/pkg/server/server.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2021 The KCP Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package server - -import ( - "context" - _ "net/http/pprof" - - "k8s.io/apimachinery/pkg/util/sets" - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/klog/v2" - - configrootcompute "github.com/kcp-dev/kcp/config/rootcompute" - coreserver "github.com/kcp-dev/kcp/pkg/server" - corev1alpha1 "github.com/kcp-dev/kcp/sdk/apis/core/v1alpha1" -) - -type Server struct { - CompletedConfig - - Core *coreserver.Server -} - -func NewServer(c CompletedConfig) (*Server, error) { - core, err := coreserver.NewServer(c.Core) - if err != nil { - return nil, err - } - - s := &Server{ - CompletedConfig: c, - - Core: core, - } - - return s, nil -} - -func (s *Server) Run(ctx context.Context) error { - logger := klog.FromContext(ctx).WithValues("component", "kcp") - ctx = klog.NewContext(ctx, logger) - - enabled := sets.New(s.Options.Core.Controllers.IndividuallyEnabled...) - if len(enabled) > 0 { - logger.WithValues("controllers", enabled).Info("starting controllers individually") - } - - if s.Options.Core.Controllers.EnableAll || enabled.Has("cluster") { - // bootstrap root compute workspace - computeBoostrapHookName := "rootComputeBoostrap" - if err := s.Core.AddPostStartHook(computeBoostrapHookName, func(hookContext genericapiserver.PostStartHookContext) error { - logger := logger.WithValues("postStartHook", computeBoostrapHookName) - if s.Core.Options.Extra.ShardName == corev1alpha1.RootShard { - // the root ws is only present on the root shard - logger.Info("waiting to bootstrap root compute workspace until root phase1 is complete") - s.Core.WaitForPhase1Finished() - - logger.Info("starting bootstrapping root compute workspace") - if err := configrootcompute.Bootstrap(goContext(hookContext), - s.Core.BootstrapApiExtensionsClusterClient, - s.Core.BootstrapDynamicClusterClient, - sets.New(s.Core.Options.Extra.BatteriesIncluded...), - ); err != nil { - logger.Error(err, "failed to bootstrap root compute workspace") - return nil // don't klog.Fatal. This only happens when context is cancelled. - } - logger.Info("finished bootstrapping root compute workspace") - } - return nil - }); err != nil { - return err - } - } - - return s.Core.Run(ctx) -} - -// goContext turns the PostStartHookContext into a context.Context for use in routines that may or may not -// run inside of a post-start-hook. The k8s APIServer wrote the post-start-hook context code before contexts -// were part of the Go stdlib. -func goContext(parent genericapiserver.PostStartHookContext) context.Context { - ctx, cancel := context.WithCancel(context.Background()) - go func(done <-chan struct{}) { - <-done - cancel() - }(parent.StopCh) - return ctx -}