Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Migrating config entries into custom resources #419

Merged
merged 1 commit into from
Jan 25, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ BREAKING CHANGES
protocol: "http"
```

FEATURES:
* CRDs: support annotation `consul.hashicorp.com/migrate-entry` on custom resources
that will allow an existing config entry to be migrated onto a Kubernetes custom resource. [[GH-419](https://github.com/hashicorp/consul-k8s/pull/419)]

## 0.23.0 (January 22, 2021)

BUG FIXES:
Expand Down
8 changes: 5 additions & 3 deletions api/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ const (
DefaultConsulNamespace string = "default"
WildcardNamespace string = "*"

SourceKey string = "external-source"
DatacenterKey string = "consul.hashicorp.com/source-datacenter"
SourceValue string = "kubernetes"
SourceKey string = "external-source"
DatacenterKey string = "consul.hashicorp.com/source-datacenter"
MigrateEntryKey string = "consul.hashicorp.com/migrate-entry"
MigrateEntryTrue string = "true"
SourceValue string = "kubernetes"
)
54 changes: 53 additions & 1 deletion controller/configentry_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package controller

import (
"context"
"encoding/json"
"fmt"
"strings"

Expand All @@ -21,6 +22,7 @@ const (
FinalizerName = "finalizers.consul.hashicorp.com"
ConsulAgentError = "ConsulAgentError"
ExternallyManagedConfigError = "ExternallyManagedConfigError"
MigrationFailedError = "MigrationFailedError"
)

// Controller is implemented by CRD-specific controllers. It is used by
Expand Down Expand Up @@ -191,11 +193,30 @@ func (r *ConfigEntryController) ReconcileEntry(
// Check if the config entry is managed by our datacenter.
// Do not process resource if the entry was not created within our datacenter
// as it was created in a different cluster which will be managing that config entry.
requiresMigration := false
if entry.GetMeta()[common.DatacenterKey] != r.DatacenterName {
return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey]))

// Note that there is a special case where we will migrate a config entry
// that wasn't created by the controller if it has the migrate-entry annotation set to true.
// This functionality exists to help folks who are upgrading from older helm
// chart versions where they had previously created config entries themselves but
// now want to manage them through custom resources.
if configEntry.GetObjectMeta().Annotations[common.MigrateEntryKey] != common.MigrateEntryTrue {
return r.syncFailed(ctx, logger, crdCtrl, configEntry, ExternallyManagedConfigError, fmt.Errorf("config entry managed in different datacenter: %q", entry.GetMeta()[common.DatacenterKey]))
}

requiresMigration = true
}

if !configEntry.MatchesConsul(entry) {
if requiresMigration {
// If we're migrating this config entry but the custom resource
// doesn't match what's in Consul currently we error out so that
// it doesn't overwrite something accidentally.
return r.syncFailed(ctx, logger, crdCtrl, configEntry, MigrationFailedError,
r.nonMatchingMigrationError(configEntry, entry))
}

logger.Info("config entry does not match consul", "modify-index", entry.GetModifyIndex())
_, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{
Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()),
Expand All @@ -206,6 +227,20 @@ func (r *ConfigEntryController) ReconcileEntry(
}
logger.Info("config entry updated", "request-time", writeMeta.RequestTime)
return r.syncSuccessful(ctx, crdCtrl, configEntry)
} else if requiresMigration && entry.GetMeta()[common.DatacenterKey] != r.DatacenterName {
// If we get here then we're doing a migration and the entry in Consul
// matches the entry in Kubernetes. We just need to update the metadata
// of the entry in Consul to say that it's now managed by Kubernetes.
logger.Info("migrating config entry to be managed by Kubernetes")
_, writeMeta, err := r.ConsulClient.ConfigEntries().Set(consulEntry, &capi.WriteOptions{
Namespace: r.consulNamespace(consulEntry, configEntry.ConsulMirroringNS(), configEntry.ConsulGlobalResource()),
})
if err != nil {
return r.syncUnknownWithError(ctx, logger, crdCtrl, configEntry, ConsulAgentError,
fmt.Errorf("updating config entry in consul: %w", err))
}
logger.Info("config entry migrated", "request-time", writeMeta.RequestTime)
return r.syncSuccessful(ctx, crdCtrl, configEntry)
} else if configEntry.SyncedConditionStatus() != corev1.ConditionTrue {
return r.syncSuccessful(ctx, crdCtrl, configEntry)
}
Expand Down Expand Up @@ -271,6 +306,23 @@ func (r *ConfigEntryController) syncUnknownWithError(ctx context.Context,
return ctrl.Result{}, err
}

// nonMatchingMigrationError returns an error that indicates the migration failed
// because the config entries did not match.
func (r *ConfigEntryController) nonMatchingMigrationError(kubeEntry common.ConfigEntryResource, consulEntry capi.ConfigEntry) error {
// We marshal into JSON to include in the error message so users will know
// which fields aren't matching.
kubeJSON, err := json.Marshal(kubeEntry.ToConsul(r.DatacenterName))
if err != nil {
return fmt.Errorf("migration failed: unable to marshal Kubernetes resource: %s", err)
}
consulJSON, err := json.Marshal(consulEntry)
if err != nil {
return fmt.Errorf("migration failed: unable to marshal Consul resource: %s", err)
}

return fmt.Errorf("migration failed: Kubernetes resource does not match existing Consul config entry: consul=%s, kube=%s", consulJSON, kubeJSON)
}

func isNotFoundErr(err error) bool {
return err != nil && strings.Contains(err.Error(), "404")
}
Expand Down
136 changes: 136 additions & 0 deletions controller/configentry_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2701,3 +2701,139 @@ func TestConfigEntryControllers_updatesStatusWhenDeleteFails(t *testing.T) {
}
require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime")))
}

// Test that if the resource already exists in Consul but the Kube resource
// has the "migrate-entry" annotation then we let the Kube resource sync to Consul.
func TestConfigEntryController_Migration(t *testing.T) {
kubeNS := "default"
protocol := "http"
cfgEntryName := "service"

cases := map[string]struct {
KubeResource v1alpha1.ServiceDefaults
ConsulResource capi.ServiceConfigEntry
ExpErr string
}{
"identical resources should be migrated successfully": {
KubeResource: v1alpha1.ServiceDefaults{
ObjectMeta: metav1.ObjectMeta{
Name: cfgEntryName,
Namespace: kubeNS,
Annotations: map[string]string{
common.MigrateEntryKey: "true",
},
},
Spec: v1alpha1.ServiceDefaultsSpec{
Protocol: protocol,
},
},
ConsulResource: capi.ServiceConfigEntry{
Kind: capi.ServiceDefaults,
Name: cfgEntryName,
Protocol: protocol,
},
},
"different resources (protocol) should not be migrated": {
KubeResource: v1alpha1.ServiceDefaults{
ObjectMeta: metav1.ObjectMeta{
Name: cfgEntryName,
Namespace: kubeNS,
Annotations: map[string]string{
common.MigrateEntryKey: "true",
},
},
Spec: v1alpha1.ServiceDefaultsSpec{
Protocol: "tcp",
},
},
ConsulResource: capi.ServiceConfigEntry{
Kind: capi.ServiceDefaults,
Name: cfgEntryName,
Protocol: protocol,
},
ExpErr: "migration failed: Kubernetes resource does not match existing Consul config entry",
},
}

for name, c := range cases {
t.Run(name, func(t *testing.T) {
ctx := context.Background()

s := runtime.NewScheme()
s.AddKnownTypes(v1alpha1.GroupVersion, &v1alpha1.ServiceDefaults{})

client := fake.NewFakeClientWithScheme(s, &c.KubeResource)
consul, err := testutil.NewTestServerConfigT(t, nil)
require.NoError(t, err)
defer consul.Stop()

consul.WaitForServiceIntentions(t)
consulClient, err := capi.NewClient(&capi.Config{
Address: consul.HTTPAddr,
})
require.NoError(t, err)

// Create the service-defaults in Consul.
success, _, err := consulClient.ConfigEntries().Set(&c.ConsulResource, nil)
require.NoError(t, err)
require.True(t, success, "config entry was not created")

// Set up the reconciler.
logger := logrtest.TestLogger{T: t}
svcDefaultsReconciler := ServiceDefaultsController{
Client: client,
Log: logger,
ConfigEntryController: &ConfigEntryController{
ConsulClient: consulClient,
DatacenterName: datacenterName,
},
}

defaultsNamespacedName := types.NamespacedName{
Namespace: kubeNS,
Name: cfgEntryName,
}

// Trigger the reconciler.
resp, err := svcDefaultsReconciler.Reconcile(ctrl.Request{NamespacedName: defaultsNamespacedName})
if c.ExpErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.ExpErr)
} else {
require.NoError(t, err)
require.False(t, resp.Requeue)
}

entryAfterReconcile := &v1alpha1.ServiceDefaults{}
err = client.Get(ctx, defaultsNamespacedName, entryAfterReconcile)
require.NoError(t, err)

syncCondition := entryAfterReconcile.GetCondition(v1alpha1.ConditionSynced)
if c.ExpErr != "" {
// Ensure the status of the resource is migration failed.
require.Equal(t, corev1.ConditionFalse, syncCondition.Status)
require.Equal(t, MigrationFailedError, syncCondition.Reason)
require.Contains(t, syncCondition.Message, c.ExpErr)

// Check that the Consul resource hasn't changed.
entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil)
require.NoError(t, err)
require.NotContains(t, entry.GetMeta(), common.DatacenterKey)
require.Equal(t, protocol, entry.(*capi.ServiceConfigEntry).Protocol)
} else {
// Ensure the status of the resource is synced.
expectedCondition := &v1alpha1.Condition{
Type: v1alpha1.ConditionSynced,
Status: corev1.ConditionTrue,
}
require.True(t, cmp.Equal(syncCondition, expectedCondition, cmpopts.IgnoreFields(v1alpha1.Condition{}, "LastTransitionTime")))

// Ensure the Consul resource has the expected metadata.
entry, _, err := consulClient.ConfigEntries().Get(capi.ServiceDefaults, cfgEntryName, nil)
require.NoError(t, err)
require.Contains(t, entry.GetMeta(), common.DatacenterKey)
require.Equal(t, "datacenter", entry.GetMeta()[common.DatacenterKey])
}
})
}
}