From 3a6d5dbaa923e5572f263a43f9773c3c415ba2df Mon Sep 17 00:00:00 2001 From: Dat Dao Date: Wed, 11 Dec 2024 18:49:20 +0700 Subject: [PATCH 1/7] internal/controller: support custom atlas.hcl --- api/v1alpha1/atlasmigration_types.go | 3 +- api/v1alpha1/atlasschema_types.go | 3 +- api/v1alpha1/project_config.go | 101 +++++++ api/v1alpha1/target.go | 26 +- api/v1alpha1/types_test.go | 3 - api/v1alpha1/zz_generated.deepcopy.go | 66 +++++ charts/atlas-operator/templates/crds/crd.yaml | 212 +++++++++++++- .../bases/db.atlasgo.io_atlasmigrations.yaml | 106 ++++++- .../crd/bases/db.atlasgo.io_atlasschemas.yaml | 106 +++++++ .../controller/atlasmigration_controller.go | 73 ++++- .../atlasmigration_controller_test.go | 100 +++++++ internal/controller/atlasschema_controller.go | 89 +++++- .../controller/atlasschema_controller_test.go | 55 ++++ internal/controller/common.go | 81 ++++++ internal/controller/common_test.go | 114 ++++++++ test/e2e/testscript/custom-config.txtar | 273 ++++++++++++++++++ 16 files changed, 1369 insertions(+), 42 deletions(-) create mode 100644 api/v1alpha1/project_config.go create mode 100644 internal/controller/common_test.go create mode 100644 test/e2e/testscript/custom-config.txtar diff --git a/api/v1alpha1/atlasmigration_types.go b/api/v1alpha1/atlasmigration_types.go index feb44e3..4fd7623 100644 --- a/api/v1alpha1/atlasmigration_types.go +++ b/api/v1alpha1/atlasmigration_types.go @@ -61,7 +61,8 @@ type ( } // AtlasMigrationSpec defines the desired state of AtlasMigration AtlasMigrationSpec struct { - TargetSpec `json:",inline"` + TargetSpec `json:",inline"` + ProjectConfigSpec `json:",inline"` // EnvName sets the environment name used for reporting runs to Atlas Cloud. EnvName string `json:"envName,omitempty"` // Cloud defines the Atlas Cloud configuration. diff --git a/api/v1alpha1/atlasschema_types.go b/api/v1alpha1/atlasschema_types.go index 2ef3ab0..cf379ef 100644 --- a/api/v1alpha1/atlasschema_types.go +++ b/api/v1alpha1/atlasschema_types.go @@ -71,7 +71,8 @@ type ( } // AtlasSchemaSpec defines the desired state of AtlasSchema AtlasSchemaSpec struct { - TargetSpec `json:",inline"` + TargetSpec `json:",inline"` + ProjectConfigSpec `json:",inline"` // Desired Schema of the target. Schema Schema `json:"schema,omitempty"` // Cloud defines the Atlas Cloud configuration. diff --git a/api/v1alpha1/project_config.go b/api/v1alpha1/project_config.go new file mode 100644 index 0000000..62b858a --- /dev/null +++ b/api/v1alpha1/project_config.go @@ -0,0 +1,101 @@ +// Copyright 2023 The Atlas Operator Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "fmt" + + "ariga.io/atlas-go-sdk/atlasexec" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ( + // ProjectConfigSpec defines the project configuration. + ProjectConfigSpec struct { + // Config defines the project configuration. + // Should be a valid YAML string. + Config string `json:"config,omitempty"` + // ConfigFrom defines the reference to the secret key that contains the project configuration. + ConfigFrom Secret `json:"configFrom,omitempty"` + // EnvName defines the environment name that defined in the project configuration. + // If not defined, the default environment "k8s" will be used. + EnvName string `json:"envName,omitempty"` + // Vars defines the input variables for the project configuration. + Vars []Variable `json:"vars,omitempty"` + } + // Variables defines the reference of secret/configmap to the input variables for the project configuration. + Variable struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` + ValueFrom ValueFrom `json:"valueFrom,omitempty"` + } + // ValueFrom defines the reference to the secret key that contains the value. + ValueFrom struct { + // SecretKeyRef defines the secret key reference to use for the value. + SecretKeyRef *corev1.SecretKeySelector `json:"secretKeyRef,omitempty"` + // ConfigMapKeyRef defines the configmap key reference to use for the value. + ConfigMapKeyRef *corev1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` + } +) + +// GetConfig returns the project configuration. +// The configuration is resolved from the secret reference. +func (s ProjectConfigSpec) GetConfig(ctx context.Context, r client.Reader, ns string) (string, error) { + if s.Config != "" { + return s.Config, nil + } + if s.ConfigFrom.SecretKeyRef != nil { + return getSecretValue(ctx, r, ns, s.ConfigFrom.SecretKeyRef) + } + return "", nil +} + +// GetVars returns the input variables for the project configuration. +// The variables are resolved from the secret or configmap reference. +func (s ProjectConfigSpec) GetVars(ctx context.Context, r client.Reader, namespace string) (atlasexec.Vars2, error) { + vars := make(map[string]any) + for _, variable := range s.Vars { + var ( + value string + err error + ) + value = variable.Value + if variable.ValueFrom.SecretKeyRef != nil { + if value, err = getSecretValue(ctx, r, namespace, variable.ValueFrom.SecretKeyRef); err != nil { + return nil, err + } + } + if variable.ValueFrom.ConfigMapKeyRef != nil { + if value, err = getConfigMapValue(ctx, r, namespace, variable.ValueFrom.ConfigMapKeyRef); err != nil { + return nil, err + } + } + // Resolve variables with the same key by grouping them into a slice. + // It's necessary when generating Atlas command for list(string) input type. + if existingValue, exists := vars[variable.Key]; exists { + if _, ok := existingValue.([]string); ok { + vars[variable.Key] = append(existingValue.([]string), value) + } else if _, ok := existingValue.(string); ok { + vars[variable.Key] = []string{existingValue.(string), value} + } else { + return nil, fmt.Errorf("invalid variable type for %q", variable.Key) + } + } + vars[variable.Key] = value + } + return vars, nil +} diff --git a/api/v1alpha1/target.go b/api/v1alpha1/target.go index 7d08677..f5cfee7 100644 --- a/api/v1alpha1/target.go +++ b/api/v1alpha1/target.go @@ -66,7 +66,7 @@ type ( // DatabaseURL returns the database url. func (s TargetSpec) DatabaseURL(ctx context.Context, r client.Reader, ns string) (*url.URL, error) { if s.URLFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.URLFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.URLFrom.SecretKeyRef) if err != nil { return nil, err } @@ -76,21 +76,21 @@ func (s TargetSpec) DatabaseURL(ctx context.Context, r client.Reader, ns string) return url.Parse(s.URL) } if s.Credentials.UserFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.Credentials.UserFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.Credentials.UserFrom.SecretKeyRef) if err != nil { return nil, err } s.Credentials.User = val } if s.Credentials.PasswordFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.Credentials.PasswordFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.Credentials.PasswordFrom.SecretKeyRef) if err != nil { return nil, err } s.Credentials.Password = val } if s.Credentials.HostFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.Credentials.HostFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.Credentials.HostFrom.SecretKeyRef) if err != nil { return nil, err } @@ -99,7 +99,7 @@ func (s TargetSpec) DatabaseURL(ctx context.Context, r client.Reader, ns string) if s.Credentials.Host != "" { return s.Credentials.URL(), nil } - return nil, fmt.Errorf("no target database defined") + return nil, nil } // URL returns the URL for the database. @@ -133,7 +133,7 @@ func (c *Credentials) URL() *url.URL { return u } -func getSecrectValue( +func getSecretValue( ctx context.Context, r client.Reader, ns string, @@ -147,6 +147,20 @@ func getSecrectValue( return string(val.Data[ref.Key]), nil } +func getConfigMapValue( + ctx context.Context, + r client.Reader, + ns string, + ref *corev1.ConfigMapKeySelector, +) (string, error) { + val := &corev1.ConfigMap{} + err := r.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: ns}, val) + if err != nil { + return "", err + } + return string(val.Data[ref.Key]), nil +} + // Driver defines the database driver. type Driver string diff --git a/api/v1alpha1/types_test.go b/api/v1alpha1/types_test.go index 71aa4e7..80f094b 100644 --- a/api/v1alpha1/types_test.go +++ b/api/v1alpha1/types_test.go @@ -53,9 +53,6 @@ func TestTargetSpec_DatabaseURL(t *testing.T) { require.Equal(t, a, u.String()) } ) - // error - _, err := target.DatabaseURL(ctx, nil, "default") - require.ErrorContains(t, err, "no target database defined") // Should return the URL from the credentials target.Credentials = v1alpha1.Credentials{ diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a7d2354..5dc360c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -87,6 +87,7 @@ func (in *AtlasMigrationList) DeepCopyObject() runtime.Object { func (in *AtlasMigrationSpec) DeepCopyInto(out *AtlasMigrationSpec) { *out = *in in.TargetSpec.DeepCopyInto(&out.TargetSpec) + in.ProjectConfigSpec.DeepCopyInto(&out.ProjectConfigSpec) in.Cloud.DeepCopyInto(&out.Cloud) in.Dir.DeepCopyInto(&out.Dir) in.DevURLFrom.DeepCopyInto(&out.DevURLFrom) @@ -192,6 +193,7 @@ func (in *AtlasSchemaList) DeepCopyObject() runtime.Object { func (in *AtlasSchemaSpec) DeepCopyInto(out *AtlasSchemaSpec) { *out = *in in.TargetSpec.DeepCopyInto(&out.TargetSpec) + in.ProjectConfigSpec.DeepCopyInto(&out.ProjectConfigSpec) in.Schema.DeepCopyInto(&out.Schema) in.Cloud.DeepCopyInto(&out.Cloud) in.DevURLFrom.DeepCopyInto(&out.DevURLFrom) @@ -444,6 +446,29 @@ func (in *Policy) DeepCopy() *Policy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectConfigSpec) DeepCopyInto(out *ProjectConfigSpec) { + *out = *in + in.ConfigFrom.DeepCopyInto(&out.ConfigFrom) + if in.Vars != nil { + in, out := &in.Vars, &out.Vars + *out = make([]Variable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfigSpec. +func (in *ProjectConfigSpec) DeepCopy() *ProjectConfigSpec { + if in == nil { + return nil + } + out := new(ProjectConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProtectFlows) DeepCopyInto(out *ProtectFlows) { *out = *in @@ -570,3 +595,44 @@ func (in *TokenFrom) DeepCopy() *TokenFrom { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { + *out = *in + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom. +func (in *ValueFrom) DeepCopy() *ValueFrom { + if in == nil { + return nil + } + out := new(ValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + in.ValueFrom.DeepCopyInto(&out.ValueFrom) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} diff --git a/charts/atlas-operator/templates/crds/crd.yaml b/charts/atlas-operator/templates/crds/crd.yaml index 62ca8cb..9b442cb 100644 --- a/charts/atlas-operator/templates/crds/crd.yaml +++ b/charts/atlas-operator/templates/crds/crd.yaml @@ -87,6 +87,41 @@ spec: url: type: string type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -264,8 +299,9 @@ spec: type: object type: object envName: - description: EnvName sets the environment name used for reporting - runs to Atlas Cloud. + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. type: string execOrder: default: linear @@ -329,6 +365,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array required: - dir type: object @@ -507,6 +609,41 @@ spec: x-kubernetes-map-type: atomic type: object type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -651,6 +788,11 @@ spec: type: object x-kubernetes-map-type: atomic type: object + envName: + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. + type: string exclude: description: Exclude a list of glob patterns used to filter existing resources being taken into account. @@ -809,6 +951,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array type: object status: description: AtlasSchemaStatus defines the observed state of AtlasSchema diff --git a/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml b/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml index bffe266..d083760 100644 --- a/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml +++ b/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml @@ -101,6 +101,41 @@ spec: url: type: string type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -278,8 +313,9 @@ spec: type: object type: object envName: - description: EnvName sets the environment name used for reporting - runs to Atlas Cloud. + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. type: string execOrder: default: linear @@ -343,6 +379,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array required: - dir type: object diff --git a/config/crd/bases/db.atlasgo.io_atlasschemas.yaml b/config/crd/bases/db.atlasgo.io_atlasschemas.yaml index 218c7ab..50cca18 100644 --- a/config/crd/bases/db.atlasgo.io_atlasschemas.yaml +++ b/config/crd/bases/db.atlasgo.io_atlasschemas.yaml @@ -96,6 +96,41 @@ spec: x-kubernetes-map-type: atomic type: object type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -240,6 +275,11 @@ spec: type: object x-kubernetes-map-type: atomic type: object + envName: + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. + type: string exclude: description: Exclude a list of glob patterns used to filter existing resources being taken into account. @@ -398,6 +438,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array type: object status: description: AtlasSchemaStatus defines the observed state of AtlasSchema diff --git a/internal/controller/atlasmigration_controller.go b/internal/controller/atlasmigration_controller.go index 5cac65d..7805626 100644 --- a/internal/controller/atlasmigration_controller.go +++ b/internal/controller/atlasmigration_controller.go @@ -41,6 +41,7 @@ import ( "ariga.io/atlas/sql/migrate" dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1" "github.com/ariga/atlas-operator/internal/controller/watch" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" ) @@ -77,6 +78,8 @@ type ( MigrateDown bool ObservedHash string RemoteDir *dbv1alpha1.Remote + Config string + Vars atlasexec.Vars2 } ) @@ -148,7 +151,7 @@ func (r *AtlasMigrationReconciler) Reconcile(ctx context.Context, req ctrl.Reque // TODO(giautm): Create DevDB and run linter for new migration // files before applying it to the target database. - if data.DevURL == "" { + if data.URL != nil && data.DevURL == "" { // The user has not specified an URL for dev-db, // spin up a dev-db and get the connection string. data.DevURL, err = r.devDB.devURL(ctx, res, *data.URL) @@ -264,7 +267,7 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio } log.Info("reconciling migration", "env", data.EnvName) // Check if there are any pending migration files - status, err := c.MigrateStatus(ctx, &atlasexec.MigrateStatusParams{Env: data.EnvName}) + status, err := c.MigrateStatus(ctx, &atlasexec.MigrateStatusParams{Env: data.EnvName, Vars: data.Vars}) if err != nil { res.SetNotReady("Migrating", err.Error()) if isChecksumErr(err) { @@ -291,6 +294,7 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio TriggerType: atlasexec.TriggerTypeKubernetes, TriggerVersion: dbv1alpha1.VersionFromContext(ctx), }, + Vars: data.Vars, } // Atlas needs all versions to be present in the directory // to downgrade to a specific version. @@ -364,6 +368,7 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio TriggerType: atlasexec.TriggerTypeKubernetes, TriggerVersion: dbv1alpha1.VersionFromContext(ctx), }, + Vars: data.Vars, }) if err != nil { res.SetNotReady("Migrating", err.Error()) @@ -421,12 +426,23 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp MigrateDown: false, } ) + data.Config, err = s.GetConfig(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } + hasConfig := data.Config != "" + if hasConfig && data.EnvName == "" { + return nil, errors.New("env name must be set when using custom atlas.hcl config") + } if env := s.EnvName; env != "" { data.EnvName = env } if data.URL, err = s.DatabaseURL(ctx, r, res.Namespace); err != nil { return nil, transient(err) } + if !hasConfig && data.URL == nil { + return nil, transient(errors.New("no target database defined")) + } switch d := s.Dir; { case d.Remote.Name != "": c := s.Cloud @@ -495,6 +511,10 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp if err != nil { return nil, err } + data.Vars, err = s.GetVars(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } return data, nil } @@ -516,7 +536,12 @@ func (r *AtlasMigrationReconciler) recordErrEvent(res *dbv1alpha1.AtlasMigration // Calculate the hash of the given data func hashMigrationData(d *migrationData) (string, error) { h := sha256.New() - h.Write([]byte(d.URL.String())) + if d.URL != nil { + h.Write([]byte(d.URL.String())) + } + if d.Config != "" { + h.Write([]byte(d.Config)) + } if c := d.Cloud; c != nil { h.Write([]byte(c.Token)) h.Write([]byte(c.URL)) @@ -552,26 +577,48 @@ func (d *migrationData) DirURL() string { // The template is used by the Atlas CLI to apply the migrations directory. // It also validates the data before rendering the template. func (d *migrationData) render(w io.Writer) error { - if d.URL == nil { + f := hclwrite.NewFile() + for _, b := range d.asBlocks() { + f.Body().AppendBlock(b) + } + // Merge the config block if it is set + if d.Config != "" { + cfg, diags := hclwrite.ParseConfig([]byte(d.Config), "", hcl.InitialPos) + if diags.HasErrors() { + return diags + } + mergeBlocks(cfg.Body(), f.Body()) + f = cfg + } + env := searchBlock(f.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return fmt.Errorf("env block %q is not found", d.EnvName) + } + b := env.Body() + if b.GetAttribute("url") == nil { return errors.New("database URL is empty") } + migrationblock := searchBlock(b, hclwrite.NewBlock("migration", nil)) + var dirAttr *hclwrite.Attribute + if migrationblock != nil { + dirAttr = migrationblock.Body().GetAttribute("dir") + } switch { case d.hasRemoteDir(): - if d.Dir != nil { + dirURL := dirAttr.Expr().BuildTokens(nil).Bytes() + if dirAttr != nil && !strings.Contains(string(dirURL), d.DirURL()) { return errors.New("cannot use both remote and local directory") } - if d.Cloud.Token == "" { - return errors.New("Atlas Cloud token is empty") + cloudBlock := searchBlock(f.Body(), hclwrite.NewBlock("atlas", nil)) + if cloudBlock == nil { + if cloudBlock.Body().GetAttribute("token") == nil { + return errors.New("Atlas Cloud token is empty") + } } - case d.Dir != nil: + case dirAttr != nil: default: return errors.New("migration directory is empty") } - f := hclwrite.NewFile() - fBody := f.Body() - for _, b := range d.asBlocks() { - fBody.AppendBlock(b) - } if _, err := f.WriteTo(w); err != nil { return err } diff --git a/internal/controller/atlasmigration_controller_test.go b/internal/controller/atlasmigration_controller_test.go index bef7dce..26023e0 100644 --- a/internal/controller/atlasmigration_controller_test.go +++ b/internal/controller/atlasmigration_controller_test.go @@ -1030,6 +1030,32 @@ func TestBaselineTemplate(t *testing.T) { `, fileContent.String()) } +func TestCustomAtlasHCL(t *testing.T) { + migrate := &migrationData{ + EnvName: defaultEnvName, + Config: `env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "file://migrations" + baseline = "20230412003626" + } +} +`, + } + var fileContent bytes.Buffer + require.NoError(t, migrate.render(&fileContent)) + require.EqualValues(t, `env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "file://migrations" + baseline = "20230412003626" + } +} +`, fileContent.String()) +} + func TestCloudTemplate(t *testing.T) { migrate := &migrationData{ EnvName: defaultEnvName, @@ -1064,6 +1090,80 @@ env "kubernetes" { `, fileContent.String()) } +func TestCustomAtlasHCL_CloudTemplate(t *testing.T) { + migrate := &migrationData{ + EnvName: defaultEnvName, + RemoteDir: &dbv1alpha1.Remote{ + Name: "my-remote-dir", + Tag: "my-remote-tag", + }, + Config: `atlas { + cloud { + token = "my-token" + url = "https://atlasgo.io/" + project = "my-project" + } +} +env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "atlas://my-remote-dir?tag=my-remote-tag" + } +}`, + } + var fileContent bytes.Buffer + require.NoError(t, migrate.render(&fileContent)) + require.EqualValues(t, `atlas { + cloud { + token = "my-token" + url = "https://atlasgo.io/" + project = "my-project" + } +} +env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "atlas://my-remote-dir?tag=my-remote-tag" + } +}`, fileContent.String()) +} + +func TestCustomAtlasHCL_BaselineTemplate(t *testing.T) { + migrate := &migrationData{ + EnvName: defaultEnvName, + URL: must(url.Parse("sqlite://file2/?mode=memory")), + DevURL: "sqlite://dev/?mode=memory", + Cloud: &Cloud{ + URL: "https://atlasgo.io/", + Repo: "my-project", + Token: "my-token", + }, + RemoteDir: &dbv1alpha1.Remote{ + Name: "my-remote-dir", + Tag: "my-remote-tag", + }, + } + var fileContent bytes.Buffer + require.NoError(t, migrate.render(&fileContent)) + require.EqualValues(t, `atlas { + cloud { + token = "my-token" + url = "https://atlasgo.io/" + project = "my-project" + } +} +env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "atlas://my-remote-dir?tag=my-remote-tag" + } +} +`, fileContent.String()) +} + func TestMigrationWithDeploymentContext(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { type ( diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index 1be910b..c89bcbc 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -42,6 +42,7 @@ import ( "github.com/ariga/atlas-operator/api/v1alpha1" dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1" "github.com/ariga/atlas-operator/internal/controller/watch" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" ) @@ -76,8 +77,9 @@ type ( TxMode dbv1alpha1.TransactionMode Desired *url.URL Cloud *Cloud - - schema []byte + Config string + Vars atlasexec.Vars2 + schema []byte } ) @@ -149,7 +151,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Starting area to handle the heavy jobs. // Below this line is the main logic of the controller. // ==================================================== - if data.DevURL == "" { + if data.URL != nil && data.DevURL == "" { // The user has not specified an URL for dev-db, // spin up a dev-db and get the connection string. data.DevURL, err = r.devDB.devURL(ctx, res, *data.URL) @@ -217,6 +219,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Env: data.EnvName, To: desiredURL, TxMode: string(data.TxMode), + Vars: data.Vars, } repo := data.repoURL() if repo == nil { @@ -237,6 +240,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Env: data.EnvName, URL: desiredURL, Format: `{{ .Hash | base64url }}`, + Vars: data.Vars, }) if err != nil { reason, msg := "SchemaPush", err.Error() @@ -253,6 +257,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Name: path.Join(repo.Host, repo.Path), Tag: fmt.Sprintf("operator-plan-%.8s", strings.ToLower(tag)), URL: []string{desiredURL}, + Vars: data.Vars, }) if err != nil { reason, msg := "SchemaPush", err.Error() @@ -274,6 +279,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) From: []string{"env://url"}, To: []string{desiredURL}, Pending: true, + Vars: data.Vars, }) switch { case err != nil && strings.Contains(err.Error(), "no changes to be made"): @@ -308,6 +314,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Repo: repo.String(), From: []string{"env://url"}, To: []string{desiredURL}, + Vars: data.Vars, }); { case err != nil: reason, msg := "ListingPlans", err.Error() @@ -367,7 +374,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) }); err != nil { return result(err) } - err = r.lint(ctx, wd, data, nil) + err = r.lint(ctx, wd, data, data.Vars) switch d := (*destructiveErr)(nil); { case errors.As(err, &d): reason, msg := d.FirstRun() @@ -396,6 +403,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) To: desiredURL, TxMode: string(data.TxMode), AutoApprove: true, + Vars: data.Vars, }) // Run the linting policy. case data.shouldLint(): @@ -414,6 +422,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) To: desiredURL, TxMode: string(data.TxMode), AutoApprove: true, + Vars: data.Vars, }) // No linting policy is set. default: @@ -422,6 +431,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) To: desiredURL, TxMode: string(data.TxMode), AutoApprove: true, + Vars: data.Vars, }) } if err != nil { @@ -511,6 +521,17 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 TxMode: s.TxMode, } ) + data.Config, err = s.GetConfig(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } + hasConfig := data.Config != "" + if hasConfig && data.EnvName == "" { + return nil, errors.New("env name must be set when using custom atlas.hcl config") + } + if s := s.EnvName; s != "" { + data.EnvName = s + } if s := c.TokenFrom.SecretKeyRef; s != nil { token, err := getSecretValue(ctx, r, res.Namespace, s) if err != nil { @@ -529,6 +550,9 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 if err != nil { return nil, transient(err) } + if !hasConfig && data.URL == nil { + return nil, transient(errors.New("no target database defined")) + } data.Desired, data.schema, err = s.Schema.DesiredState(ctx, r, res.Namespace) if err != nil { return nil, transient(err) @@ -541,6 +565,10 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 return nil, err } } + data.Vars, err = s.GetVars(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } return data, nil } @@ -556,6 +584,26 @@ func (r *AtlasSchemaReconciler) recordErrEvent(res *dbv1alpha1.AtlasSchema, err func (d *managedData) shouldLint() bool { p := d.Policy if p == nil || p.Lint == nil || p.Lint.Destructive == nil { + // Check if the destructive policy is set to error in the custom config. + // This check won't work if the attribute value has expressions in it. + if d.Config != "" { + cfg, _ := hclwrite.ParseConfig([]byte(d.Config), "", hcl.InitialPos) + env := searchBlock(cfg.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return false + } + lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + return false + } + if v := searchBlock(lint.Body(), hclwrite.NewBlock("destructive", nil)); v != nil { + destructiveErr := v.Body().GetAttribute("error") + if destructiveErr != nil { + attrValue := string(destructiveErr.Expr().BuildTokens(nil).Bytes()) + return strings.Contains(attrValue, "true") + } + } + } return false } return p.Lint.Destructive.Error @@ -595,22 +643,35 @@ func (d *managedData) repoURL() *url.URL { // The template is used by the Atlas CLI to apply the schema. // It also validates the data before rendering the template. func (d *managedData) render(w io.Writer) error { - if d.EnvName == "" { - return errors.New("env name is not set") + f := hclwrite.NewFile() + for _, b := range d.asBlocks() { + f.Body().AppendBlock(b) } - if d.URL == nil { - return errors.New("database url is not set") + // Merge config into the atlas.hcl file. + if d.Config != "" { + cfg, diags := hclwrite.ParseConfig([]byte(d.Config), "", hcl.InitialPos) + if diags.HasErrors() { + return diags + } + mergeBlocks(cfg.Body(), f.Body()) + f = cfg } - if d.DevURL == "" { - return errors.New("dev url is not set") + if d.EnvName == "" { + return errors.New("env name is not set") } if d.Desired == nil { return errors.New("the desired state is not set") } - f := hclwrite.NewFile() - fBody := f.Body() - for _, b := range d.asBlocks() { - fBody.AppendBlock(b) + env := searchBlock(f.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return fmt.Errorf("env block %q is not found", d.EnvName) + } + b := env.Body() + if b.GetAttribute("url") == nil { + return errors.New("database url is not set") + } + if b.GetAttribute("dev") == nil { + return errors.New("dev url is not set") } if _, err := f.WriteTo(w); err != nil { return err diff --git a/internal/controller/atlasschema_controller_test.go b/internal/controller/atlasschema_controller_test.go index 0532b0a..943f453 100644 --- a/internal/controller/atlasschema_controller_test.go +++ b/internal/controller/atlasschema_controller_test.go @@ -453,6 +453,61 @@ func TestConfigTemplate(t *testing.T) { require.EqualValues(t, expected, buf.String()) } +func TestCustomAtlasHCL_PolicyTemplate(t *testing.T) { + var buf bytes.Buffer + data := &managedData{ + EnvName: defaultEnvName, + URL: must(url.Parse("mysql://root:password@localhost:3306/test")), + DevURL: "mysql://root:password@localhost:3306/dev", + Schemas: []string{"foo", "bar"}, + Desired: must(url.Parse("file://schema.sql")), + Config: ` +env "kubernetes" { + diff { + concurrent_index { + create = true + drop = true + } + skip { + drop_schema = true + drop_table = true + } + } + lint { + destructive { + error = true + } + } +} + `, + } + err := data.render(&buf) + require.NoError(t, err) + expected := ` +env "kubernetes" { + diff { + concurrent_index { + create = true + drop = true + } + skip { + drop_schema = true + drop_table = true + } + } + lint { + destructive { + error = true + } + } + dev = "mysql://root:password@localhost:3306/dev" + schemas = ["foo", "bar"] + url = "mysql://root:password@localhost:3306/test" +} + ` + require.EqualValues(t, expected, buf.String()) +} + func conditionReconciling() *dbv1alpha1.AtlasSchema { return &dbv1alpha1.AtlasSchema{ ObjectMeta: objmeta(), diff --git a/internal/controller/common.go b/internal/controller/common.go index bf208f8..4c1ba89 100644 --- a/internal/controller/common.go +++ b/internal/controller/common.go @@ -15,15 +15,21 @@ package controller import ( + "cmp" "context" "errors" "fmt" + "iter" + "maps" "regexp" + "slices" "strings" "time" "ariga.io/atlas-go-sdk/atlasexec" "ariga.io/atlas/sql/migrate" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -194,3 +200,78 @@ func listStringVal(s []string) cty.Value { } return cty.ListVal(v) } + +// mergeBlocks merges the given block with the env block with the given name. +func mergeBlocks(dst *hclwrite.Body, src *hclwrite.Body) { + for _, srcBlk := range src.Blocks() { + distBlk := searchBlock(dst, srcBlk) + // If there is no block with the same type and name, append it. + if distBlk == nil { + appendBlock(dst, srcBlk) + continue + } + mergeBlock(distBlk, srcBlk) + } +} + +// searchBlock searches for a block with the given type and name in the parent block. +func searchBlock(parent *hclwrite.Body, target *hclwrite.Block) *hclwrite.Block { + blocks := parent.Blocks() + typBlocks := make([]*hclwrite.Block, 0, len(blocks)) + for _, b := range blocks { + if b.Type() == target.Type() { + typBlocks = append(typBlocks, b) + } + } + if len(typBlocks) == 0 { + // No things here, return nil. + return nil + } + // Check if there is a block with the given name. + idx := slices.IndexFunc(typBlocks, func(b *hclwrite.Block) bool { + return slices.Compare(b.Labels(), target.Labels()) == 0 + }) + if idx == -1 { + // No block matched, check if there is an unnamed env block. + idx = slices.IndexFunc(typBlocks, func(b *hclwrite.Block) bool { + return len(b.Labels()) == 0 + }) + if idx == -1 { + return nil + } + } + return typBlocks[idx] +} + +// mergeBlock merges the source block into the destination block. +func mergeBlock(dst, src *hclwrite.Block) { + for name, attr := range mapsSorted(src.Body().Attributes()) { + dst.Body().SetAttributeRaw(name, attr.Expr().BuildTokens(nil)) + } + // Traverse to the nested blocks. + mergeBlocks(dst.Body(), src.Body()) +} + +// appendBlock appends a block to the body and ensures there is a newline before the block. +// It returns the appended block. +// +// There is a bug in hclwrite that causes the block to be appended without a newline +// https://github.com/hashicorp/hcl/issues/687 +func appendBlock(body *hclwrite.Body, blk *hclwrite.Block) *hclwrite.Block { + t := body.BuildTokens(nil) + if len(t) == 0 || t[len(t)-1].Type != hclsyntax.TokenNewline { + body.AppendNewline() + } + return body.AppendBlock(blk) +} + +// mapsSorted return a sequence of key-value pairs sorted by key. +func mapsSorted[K cmp.Ordered, V any](m map[K]V) iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for _, k := range slices.Sorted(maps.Keys(m)) { + if !yield(k, m[k]) { + return + } + } + } +} diff --git a/internal/controller/common_test.go b/internal/controller/common_test.go new file mode 100644 index 0000000..918e85f --- /dev/null +++ b/internal/controller/common_test.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Atlas Operator Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" +) + +func Test_mergeBlocks(t *testing.T) { + type args struct { + dst string + src string + } + tests := []struct { + name string + args args + expected string + }{ + { + name: "empty", + args: args{ + dst: "", + src: "", + }, + expected: "", + }, + { + name: "dst empty", + args: args{ + dst: "", + src: `env "example" {}`, + }, + expected: ` +env "example" {}`, + }, + { + name: "same block", + args: args{ + dst: `env "example" {}`, + src: `env "example" {}`, + }, + expected: `env "example" {}`, + }, + { + name: "different block", + args: args{ + dst: `env "example" {}`, + src: `env "example2" {}`, + }, + expected: `env "example" {} +env "example2" {}`, + }, + { + name: "same block with different attributes", + args: args{ + dst: ` +env "example" { + key = "value" +}`, + src: ` +env "example" { + key2 = "value2" +}`, + }, + expected: ` +env "example" { + key = "value" + key2 = "value2" +}`, + }, + { + name: "same block with same attributes", + args: args{ + dst: ` +env "example" { + key = "value" +}`, + src: ` +env "example" { + key = "value2" +}`, + }, + expected: ` +env "example" { + key = "value2" +}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dst, _ := hclwrite.ParseConfig([]byte(tt.args.dst), "", hcl.InitialPos) + src, _ := hclwrite.ParseConfig([]byte(tt.args.src), "", hcl.InitialPos) + mergeBlocks(dst.Body(), src.Body()) + if got := string(dst.Bytes()); got != tt.expected { + t.Errorf("mergeBlocks() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/test/e2e/testscript/custom-config.txtar b/test/e2e/testscript/custom-config.txtar new file mode 100644 index 0000000..a302c6e --- /dev/null +++ b/test/e2e/testscript/custom-config.txtar @@ -0,0 +1,273 @@ +env SCHEMA_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable +env SCHEMA_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable +env MIGRATE_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5434/postgres?sslmode=disable +env MIGRATE_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5435/postgres?sslmode=disable +kubectl apply -f database.yaml +kubectl create secret generic schema-db-creds --from-literal=url=${SCHEMA_DB_URL} +kubectl create configmap schema-db-dev-creds --from-literal=url=${SCHEMA_DB_DEV_URL} +kubectl create secret generic migrate-db-creds --from-literal=url=${MIGRATE_DB_URL} +kubectl create configmap migrate-db-dev-creds --from-literal=url=${MIGRATE_DB_DEV_URL} + +# Wait for the first pod created +kubectl-wait-available deploy/postgres +# Wait for the DB ready before creating the schema +kubectl-wait-ready -l app=postgres pods + +# Create the schema +kubectl apply -f schema.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample + +# Create the migration +kubectl apply -f migration.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample + +# Patch a invalid config of schema from secret +kubectl create secret generic schema-config --from-file=config.hcl +kubectl create secret generic migration-config --from-file=config.hcl +kubectl patch -f schema.yaml --type merge --patch-file patch-schema-config-from-configmap.yaml +kubectl patch -f migration.yaml --type merge --patch-file patch-migration-config-from-configmap.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample + +# Ensure the operator priority the spec url over the config +kubectl patch -f schema.yaml --type merge --patch-file patch-invalid-url.yaml +kubectl patch -f migration.yaml --type merge --patch-file patch-invalid-url.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasSchemas/sample +kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasMigrations/sample + +-- schema.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + name: sample +spec: + envName: "test" + schema: + sql: | + create table users ( + id int not null, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: schema-db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: schema-db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + env "test" { + url = var.db_url + dev = var.dev_db_url + } +-- patch-schema-config-from-configmap.yaml -- +spec: + config: + configFrom: + secretKeyRef: + name: schema-config + key: config.hcl +-- patch-migration-config-from-configmap.yaml -- +spec: + config: + configFrom: + secretKeyRef: + name: schema-config + key: config.hcl +-- patch-invalid-url.yaml -- +spec: + url: "postgres://invalid" +-- config.hcl -- +variable "db_url" { + type = string +} +variable "dev_db_url" { + type = string +} +env "test" { + url = var.db_url + dev = var.dev_db_url +} +-- migration.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasMigration +metadata: + name: sample +spec: + envName: "test" + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: migrate-db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: migrate-db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + env "test" { + url = var.db_url + dev = var.dev_db_url + } + dir: + local: + 20230316085611.sql: | + create sequence users_seq; + create table users ( + id int not null default nextval ('users_seq'), + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + atlas.sum: | + h1:FwM0ApKo8xhcZFrSlpa6dYjvi0fnDPo/aZSzajtbHLc= + 20230316085611.sql h1:ldFr73m6ZQzNi8q9dVJsOU/ZHmkBo4Sax03AaL0VUUs= +-- database.yaml -- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: postgres + port: 5432 + targetPort: postgres + - name: postgres-dev + port: 5433 + targetPort: postgres-dev + - name: pg-migrate + port: 5434 + targetPort: pg-migrate + - name: pg-migrate-dev + port: 5435 + targetPort: pg-migrate-dev + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres +spec: + selector: + matchLabels: + app: postgres + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + securityContext: + runAsNonRoot: true + runAsUser: 999 + containers: + - name: postgres + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + ports: + - containerPort: 5432 + name: postgres + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: postgres-dev + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5433" + ports: + - containerPort: 5433 + name: postgres-dev + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: pg-migrate + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5434" + ports: + - containerPort: 5434 + name: pg-migrate + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: pg-migrate-dev + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5435" + ports: + - containerPort: 5435 + name: pg-migrate-dev + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 From e521224ae1ebbd442efb491f144848a92dc0541b Mon Sep 17 00:00:00 2001 From: Dat Dao Date: Mon, 23 Dec 2024 11:00:25 +0700 Subject: [PATCH 2/7] block multi-target vs lint expression --- api/v1alpha1/project_config.go | 23 +++++-- .../controller/atlasmigration_controller.go | 19 +++--- .../atlasmigration_controller_test.go | 18 ++++-- internal/controller/atlasschema_controller.go | 62 +++++++++++++------ .../controller/atlasschema_controller_test.go | 41 +++++++++++- 5 files changed, 119 insertions(+), 44 deletions(-) diff --git a/api/v1alpha1/project_config.go b/api/v1alpha1/project_config.go index 62b858a..8c4447b 100644 --- a/api/v1alpha1/project_config.go +++ b/api/v1alpha1/project_config.go @@ -19,6 +19,8 @@ import ( "fmt" "ariga.io/atlas-go-sdk/atlasexec" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -54,14 +56,23 @@ type ( // GetConfig returns the project configuration. // The configuration is resolved from the secret reference. -func (s ProjectConfigSpec) GetConfig(ctx context.Context, r client.Reader, ns string) (string, error) { - if s.Config != "" { - return s.Config, nil - } +func (s ProjectConfigSpec) GetConfig(ctx context.Context, r client.Reader, ns string) (*hclwrite.File, error) { + rawConfig := s.Config if s.ConfigFrom.SecretKeyRef != nil { - return getSecretValue(ctx, r, ns, s.ConfigFrom.SecretKeyRef) + cfgFromSecret, err := getSecretValue(ctx, r, ns, s.ConfigFrom.SecretKeyRef) + if err != nil { + return nil, err + } + rawConfig = cfgFromSecret + } + if rawConfig == "" { + return nil, nil + } + config, diags := hclwrite.ParseConfig([]byte(rawConfig), "", hcl.InitialPos) + if diags.HasErrors() { + return nil, fmt.Errorf("failed to parse project configuration: %v", diags) } - return "", nil + return config, nil } // GetVars returns the input variables for the project configuration. diff --git a/internal/controller/atlasmigration_controller.go b/internal/controller/atlasmigration_controller.go index 7805626..a289ff2 100644 --- a/internal/controller/atlasmigration_controller.go +++ b/internal/controller/atlasmigration_controller.go @@ -41,7 +41,6 @@ import ( "ariga.io/atlas/sql/migrate" dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1" "github.com/ariga/atlas-operator/internal/controller/watch" - "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" ) @@ -78,7 +77,7 @@ type ( MigrateDown bool ObservedHash string RemoteDir *dbv1alpha1.Remote - Config string + Config *hclwrite.File Vars atlasexec.Vars2 } ) @@ -430,7 +429,7 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp if err != nil { return nil, transient(err) } - hasConfig := data.Config != "" + hasConfig := data.Config != nil if hasConfig && data.EnvName == "" { return nil, errors.New("env name must be set when using custom atlas.hcl config") } @@ -539,8 +538,8 @@ func hashMigrationData(d *migrationData) (string, error) { if d.URL != nil { h.Write([]byte(d.URL.String())) } - if d.Config != "" { - h.Write([]byte(d.Config)) + if d.Config != nil { + h.Write([]byte(d.Config.Bytes())) } if c := d.Cloud; c != nil { h.Write([]byte(c.Token)) @@ -582,13 +581,9 @@ func (d *migrationData) render(w io.Writer) error { f.Body().AppendBlock(b) } // Merge the config block if it is set - if d.Config != "" { - cfg, diags := hclwrite.ParseConfig([]byte(d.Config), "", hcl.InitialPos) - if diags.HasErrors() { - return diags - } - mergeBlocks(cfg.Body(), f.Body()) - f = cfg + if d.Config != nil { + mergeBlocks(d.Config.Body(), f.Body()) + f = d.Config } env := searchBlock(f.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) if env == nil { diff --git a/internal/controller/atlasmigration_controller_test.go b/internal/controller/atlasmigration_controller_test.go index 26023e0..c86d529 100644 --- a/internal/controller/atlasmigration_controller_test.go +++ b/internal/controller/atlasmigration_controller_test.go @@ -33,6 +33,8 @@ import ( "ariga.io/atlas-go-sdk/atlasexec" "ariga.io/atlas/sql/migrate" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -1033,7 +1035,7 @@ func TestBaselineTemplate(t *testing.T) { func TestCustomAtlasHCL(t *testing.T) { migrate := &migrationData{ EnvName: defaultEnvName, - Config: `env "kubernetes" { + Config: mustParseHCL(`env "kubernetes" { url = "sqlite://file2/?mode=memory" dev = "sqlite://dev/?mode=memory" migration { @@ -1041,7 +1043,7 @@ func TestCustomAtlasHCL(t *testing.T) { baseline = "20230412003626" } } -`, +`), } var fileContent bytes.Buffer require.NoError(t, migrate.render(&fileContent)) @@ -1097,7 +1099,7 @@ func TestCustomAtlasHCL_CloudTemplate(t *testing.T) { Name: "my-remote-dir", Tag: "my-remote-tag", }, - Config: `atlas { + Config: mustParseHCL(`atlas { cloud { token = "my-token" url = "https://atlasgo.io/" @@ -1110,7 +1112,7 @@ env "kubernetes" { migration { dir = "atlas://my-remote-dir?tag=my-remote-tag" } -}`, +}`), } var fileContent bytes.Buffer require.NoError(t, migrate.render(&fileContent)) @@ -1391,3 +1393,11 @@ func writeDir(t *testing.T, dir migrate.Dir, w io.Writer) { _, err = fmt.Fprintf(w, `{"data":{"dirState":{"content":%q}}}`, base64.StdEncoding.EncodeToString(arc)) require.NoError(t, err) } + +func mustParseHCL(content string) *hclwrite.File { + f, err := hclwrite.ParseConfig([]byte(content), "", hcl.InitialPos) + if err != nil { + panic(err) + } + return f +} diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index c89bcbc..88e7849 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -25,6 +25,7 @@ import ( "net/url" "path" "path/filepath" + "strconv" "strings" "time" @@ -42,7 +43,6 @@ import ( "github.com/ariga/atlas-operator/api/v1alpha1" dbv1alpha1 "github.com/ariga/atlas-operator/api/v1alpha1" "github.com/ariga/atlas-operator/internal/controller/watch" - "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" ) @@ -77,7 +77,7 @@ type ( TxMode dbv1alpha1.TransactionMode Desired *url.URL Cloud *Cloud - Config string + Config *hclwrite.File Vars atlasexec.Vars2 schema []byte } @@ -134,6 +134,10 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) r.recordErrEvent(res, err) return result(err) } + if data.hasTargets() { + res.SetNotReady("MultipleTargets", "Multiple targets are not supported") + return ctrl.Result{}, nil + } hash, err := data.hash() if err != nil { res.SetNotReady("CalculatingHash", err.Error()) @@ -205,6 +209,12 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) log.Info("the resource is connected to Atlas Cloud", "org", whoami.Org) } var reports []*atlasexec.SchemaApply + shouldLint, err := data.shouldLint() + if err != nil { + res.SetNotReady("LintPolicyError", err.Error()) + r.recordErrEvent(res, err) + return result(err) + } switch desiredURL := data.Desired.String(); { // The resource is connected to Atlas Cloud. case whoami != nil: @@ -406,7 +416,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Vars: data.Vars, }) // Run the linting policy. - case data.shouldLint(): + case shouldLint: if err = r.lint(ctx, wd, data, nil); err != nil { reason, msg := "LintPolicyError", err.Error() res.SetNotReady(reason, msg) @@ -525,7 +535,7 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 if err != nil { return nil, transient(err) } - hasConfig := data.Config != "" + hasConfig := data.Config != nil if hasConfig && data.EnvName == "" { return nil, errors.New("env name must be set when using custom atlas.hcl config") } @@ -581,32 +591,48 @@ func (r *AtlasSchemaReconciler) recordErrEvent(res *dbv1alpha1.AtlasSchema, err } // ShouldLint returns true if the linting policy is set to error. -func (d *managedData) shouldLint() bool { +func (d *managedData) shouldLint() (bool, error) { p := d.Policy if p == nil || p.Lint == nil || p.Lint.Destructive == nil { // Check if the destructive policy is set to error in the custom config. // This check won't work if the attribute value has expressions in it. - if d.Config != "" { - cfg, _ := hclwrite.ParseConfig([]byte(d.Config), "", hcl.InitialPos) - env := searchBlock(cfg.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if d.Config != nil { + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) if env == nil { - return false + return false, nil } lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) if lint == nil { - return false + return false, nil } if v := searchBlock(lint.Body(), hclwrite.NewBlock("destructive", nil)); v != nil { destructiveErr := v.Body().GetAttribute("error") if destructiveErr != nil { - attrValue := string(destructiveErr.Expr().BuildTokens(nil).Bytes()) - return strings.Contains(attrValue, "true") + attrValue := strings.TrimSpace(string(destructiveErr.Expr().BuildTokens(nil).Bytes())) + attrValue = strings.Trim(attrValue, "\"") + b, err := strconv.ParseBool(attrValue) + if err != nil { + return b, errors.New("cannot determine the value of the destructive.error attribute") + } + return b, nil } } } + return false, nil + } + return p.Lint.Destructive.Error, nil +} + +// hasTargets returns true if the environment has multiple targets/ multi-tenancy. +func (d *managedData) hasTargets() bool { + if d.Config == nil { + return false + } + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { return false } - return p.Lint.Destructive.Error + return env.Body().GetAttribute("for_each") != nil } // hash returns the sha256 hash of the desired. @@ -648,13 +674,9 @@ func (d *managedData) render(w io.Writer) error { f.Body().AppendBlock(b) } // Merge config into the atlas.hcl file. - if d.Config != "" { - cfg, diags := hclwrite.ParseConfig([]byte(d.Config), "", hcl.InitialPos) - if diags.HasErrors() { - return diags - } - mergeBlocks(cfg.Body(), f.Body()) - f = cfg + if d.Config != nil { + mergeBlocks(d.Config.Body(), f.Body()) + f = d.Config } if d.EnvName == "" { return errors.New("env name is not set") diff --git a/internal/controller/atlasschema_controller_test.go b/internal/controller/atlasschema_controller_test.go index 943f453..11c7671 100644 --- a/internal/controller/atlasschema_controller_test.go +++ b/internal/controller/atlasschema_controller_test.go @@ -202,6 +202,43 @@ func TestExtractData_CustomDevURL_Secret(t *testing.T) { require.EqualValues(t, "mysql://dev", data.DevURL) } +func TestExtractData_LintExpression(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +env "kubernetes" { + lint { + destructive { + error = 1 == 1 + } + } +} + ` + tt := newTest(t) + data, err := tt.r.extractData(context.Background(), sc) + require.NoError(t, err) + _, err = data.shouldLint() + require.Error(t, err) + require.Contains(t, err.Error(), "cannot determine the value of the destructive.error attribute") +} + +func TestExtractData_MultiTargets(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +env "kubernetes" { + for_each = ["foo", "bar"] +} + ` + tt := newTest(t) + data, err := tt.r.extractData(context.Background(), sc) + require.NoError(t, err) + hasTargets := data.hasTargets() + require.True(t, hasTargets) +} + func TestReconcile_Credentials_BadPassSecret(t *testing.T) { tt := newTest(t) sc := conditionReconciling() @@ -461,7 +498,7 @@ func TestCustomAtlasHCL_PolicyTemplate(t *testing.T) { DevURL: "mysql://root:password@localhost:3306/dev", Schemas: []string{"foo", "bar"}, Desired: must(url.Parse("file://schema.sql")), - Config: ` + Config: mustParseHCL(` env "kubernetes" { diff { concurrent_index { @@ -479,7 +516,7 @@ env "kubernetes" { } } } - `, + `), } err := data.render(&buf) require.NoError(t, err) From 3cd7c8e1480651deba1e6411fb3c2626a8d3bbef Mon Sep 17 00:00:00 2001 From: Dat Dao Date: Mon, 23 Dec 2024 11:02:31 +0700 Subject: [PATCH 3/7] naming stuff --- api/v1alpha1/project_config.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/v1alpha1/project_config.go b/api/v1alpha1/project_config.go index 8c4447b..508abd7 100644 --- a/api/v1alpha1/project_config.go +++ b/api/v1alpha1/project_config.go @@ -77,7 +77,7 @@ func (s ProjectConfigSpec) GetConfig(ctx context.Context, r client.Reader, ns st // GetVars returns the input variables for the project configuration. // The variables are resolved from the secret or configmap reference. -func (s ProjectConfigSpec) GetVars(ctx context.Context, r client.Reader, namespace string) (atlasexec.Vars2, error) { +func (s ProjectConfigSpec) GetVars(ctx context.Context, r client.Reader, ns string) (atlasexec.Vars2, error) { vars := make(map[string]any) for _, variable := range s.Vars { var ( @@ -86,12 +86,12 @@ func (s ProjectConfigSpec) GetVars(ctx context.Context, r client.Reader, namespa ) value = variable.Value if variable.ValueFrom.SecretKeyRef != nil { - if value, err = getSecretValue(ctx, r, namespace, variable.ValueFrom.SecretKeyRef); err != nil { + if value, err = getSecretValue(ctx, r, ns, variable.ValueFrom.SecretKeyRef); err != nil { return nil, err } } if variable.ValueFrom.ConfigMapKeyRef != nil { - if value, err = getConfigMapValue(ctx, r, namespace, variable.ValueFrom.ConfigMapKeyRef); err != nil { + if value, err = getConfigMapValue(ctx, r, ns, variable.ValueFrom.ConfigMapKeyRef); err != nil { return nil, err } } From 528651b0aa83c1bd2c864cb481ff4948330e3e2c Mon Sep 17 00:00:00 2001 From: Dat Dao Date: Tue, 24 Dec 2024 11:57:40 +0700 Subject: [PATCH 4/7] only use custom config when enabled --- README.md | 45 +++++++++++++++++++ .../atlas-operator/templates/deployment.yaml | 2 + charts/atlas-operator/values.yaml | 4 ++ cmd/main.go | 36 +++++++++++++-- config/customconfig/kustomization.yaml | 30 +++++++++++++ .../controller/atlasmigration_controller.go | 18 +++++++- .../atlasmigration_controller_test.go | 18 ++++++++ internal/controller/atlasschema_controller.go | 27 ++++++++--- .../controller/atlasschema_controller_test.go | 15 +++++++ internal/controller/testhelper.go | 10 +++-- skaffold.yaml | 1 + 11 files changed, 191 insertions(+), 15 deletions(-) create mode 100644 config/customconfig/kustomization.yaml diff --git a/README.md b/README.md index 5c792cc..ddaf335 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,51 @@ To configure the operator, you can set the following values in the `values.yaml` - `prewarmDevDB`: The Operator always keeps devdb resources around to speed up the migration process. Set this to `false` to disable this feature. +- `allowCustomConfig`: Enable this to allow custom `atlas.hcl` configuration. To use this feature, you can set the `config` field in the `AtlasSchema` or `AtlasMigration` resource. + +```yaml + spec: + envName: myenv + config: | + env myenv {} + # config from secretKeyRef + # configFrom: + # secretKeyRef: + # key: config + # name: my-secret +``` + +To use variables in the `config` field: + +```yaml + spec: + envName: myenv + variables: + - name: db_url + value: "mysql://root" + # variables from secretKeyRef + # - name: db_url + # valueFrom: + # secretKeyRef: + # key: db_url + # name: my-secret + # variables from configMapKeyRef + # - name: db_url + # valueFrom: + # configMapKeyRef: + # key: db_url + # name: my-configmap + config: | + variable "db_url" { + type = string + } + env myenv { + url = var.db_url + } +``` + +> Note: Allowing custom configuration can elevate privileges of the operator. Use this feature with caution. + - `extraEnvs`: Used to set environment variables for the operator ```yaml diff --git a/charts/atlas-operator/templates/deployment.yaml b/charts/atlas-operator/templates/deployment.yaml index b964666..c3b7c4b 100644 --- a/charts/atlas-operator/templates/deployment.yaml +++ b/charts/atlas-operator/templates/deployment.yaml @@ -55,6 +55,8 @@ spec: env: - name: PREWARM_DEVDB value: "{{ .Values.prewarmDevDB }}" + - name: ALLOW_CUSTOM_CONFIG + value: "{{ .Values.allowCustomConfig }}" {{- with .Values.extraEnvs }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/atlas-operator/values.yaml b/charts/atlas-operator/values.yaml index 44a5e19..67e5c8d 100644 --- a/charts/atlas-operator/values.yaml +++ b/charts/atlas-operator/values.yaml @@ -48,6 +48,10 @@ affinity: {} # Set this to true to keep the devdb pods around. prewarmDevDB: true +# Enable this to allow custom project configuration +# Warning: This action will elevate the privileges of the operator +allowCustomConfig: false + # -- Additional environment variables to set extraEnvs: [] # extraEnvs: diff --git a/cmd/main.go b/cmd/main.go index 1de3d0e..837892f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -58,6 +58,8 @@ const ( vercheckURL = "https://vercheck.ariga.io" // prewarmDevDB when disabled it deletes the devDB pods after the schema is created prewarmDevDB = "PREWARM_DEVDB" + // allowCustomConfig when enabled it allows the use of custom config + allowsCustomConfig = "ALLOW_CUSTOM_CONFIG" ) func init() { @@ -141,13 +143,24 @@ func main() { os.Exit(1) } prewarmDevDB := getPrewarmDevDBEnv() - if err = controller.NewAtlasSchemaReconciler(mgr, controller.NewAtlasExec, prewarmDevDB). - SetupWithManager(mgr); err != nil { + allowCustomConfig := getAllowCustomConfigEnv() + // Setup controller for AtlasSchema + schemaController := controller.NewAtlasSchemaReconciler(mgr, prewarmDevDB) + schemaController.SetAtlasClient(controller.NewAtlasExec) + if allowCustomConfig { + schemaController.AllowCustomConfig() + } + if err := schemaController.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AtlasSchema") os.Exit(1) } - if err = controller.NewAtlasMigrationReconciler(mgr, controller.NewAtlasExec, prewarmDevDB). - SetupWithManager(mgr); err != nil { + // Setup controller for AtlasMigration + migrationController := controller.NewAtlasMigrationReconciler(mgr, prewarmDevDB) + migrationController.SetAtlasClient(controller.NewAtlasExec) + if allowCustomConfig { + migrationController.AllowCustomConfig() + } + if err = migrationController.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AtlasMigration") os.Exit(1) } @@ -219,3 +232,18 @@ func getPrewarmDevDBEnv() bool { } return prewarmDevDB } + +// getAllowCustomConfigEnv returns the value of the env var ALLOW_CUSTOM_CONFIG. +// if the env var is not set, it returns false. +func getAllowCustomConfigEnv() bool { + env := os.Getenv(allowsCustomConfig) + if env == "" { + return false + } + allowsCustomConfig, err := strconv.ParseBool(env) + if err != nil { + setupLog.Error(err, "invalid value for env var ALLOW_CUSTOM_CONFIG, expected true or false") + os.Exit(1) + } + return allowsCustomConfig +} diff --git a/config/customconfig/kustomization.yaml b/config/customconfig/kustomization.yaml new file mode 100644 index 0000000..787dc7a --- /dev/null +++ b/config/customconfig/kustomization.yaml @@ -0,0 +1,30 @@ +# Copyright 2023 The Atlas Operator Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +namespace: atlas-operator-system +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../sqlserver +patches: +- target: + kind: Deployment + namespace: system + name: controller-manager + patch: |- + - op: add + path: "/spec/template/spec/containers/0/env/-" + value: + name: ALLOW_CUSTOM_CONFIG + value: "true" diff --git a/internal/controller/atlasmigration_controller.go b/internal/controller/atlasmigration_controller.go index a289ff2..c6734cc 100644 --- a/internal/controller/atlasmigration_controller.go +++ b/internal/controller/atlasmigration_controller.go @@ -61,6 +61,8 @@ type ( secretWatcher *watch.ResourceWatcher recorder record.EventRecorder devDB *devDBReconciler + // AllowCustomConfig allows the controller to use custom atlas.hcl config. + allowCustomConfig bool } // migrationData is the data used to render the HCL template // that will be used for Atlas CLI @@ -82,12 +84,11 @@ type ( } ) -func NewAtlasMigrationReconciler(mgr Manager, atlas AtlasExecFn, prewarmDevDB bool) *AtlasMigrationReconciler { +func NewAtlasMigrationReconciler(mgr Manager, prewarmDevDB bool) *AtlasMigrationReconciler { r := mgr.GetEventRecorderFor("atlasmigration-controller") return &AtlasMigrationReconciler{ Client: mgr.GetClient(), scheme: mgr.GetScheme(), - atlasClient: atlas, configMapWatcher: watch.New(), secretWatcher: watch.New(), recorder: r, @@ -211,6 +212,16 @@ func (r *AtlasMigrationReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } +// SetAtlasClient sets the Atlas client for the reconciler. +func (r *AtlasMigrationReconciler) SetAtlasClient(fn AtlasExecFn) { + r.atlasClient = fn +} + +// AllowCustomConfig allows the controller to use custom atlas.hcl config. +func (r *AtlasMigrationReconciler) AllowCustomConfig() { + r.allowCustomConfig = true +} + func (r *AtlasMigrationReconciler) watchRefs(res *dbv1alpha1.AtlasMigration) { if c := res.Spec.Dir.ConfigMapRef; c != nil { r.configMapWatcher.Watch( @@ -429,6 +440,9 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp if err != nil { return nil, transient(err) } + if !r.allowCustomConfig && data.Config != nil { + return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") + } hasConfig := data.Config != nil if hasConfig && data.EnvName == "" { return nil, errors.New("env name must be set when using custom atlas.hcl config") diff --git a/internal/controller/atlasmigration_controller_test.go b/internal/controller/atlasmigration_controller_test.go index c86d529..2f5514d 100644 --- a/internal/controller/atlasmigration_controller_test.go +++ b/internal/controller/atlasmigration_controller_test.go @@ -990,6 +990,23 @@ func TestWatcher_enabled(t *testing.T) { }, watched) } +func TestCustomConfig_disabled(t *testing.T) { + tt := migrationCliTest(t) + tt.r.allowCustomConfig = false + _, err := tt.r.extractData(context.Background(), &dbv1alpha1.AtlasMigration{ + ObjectMeta: migrationObjmeta(), + Spec: dbv1alpha1.AtlasMigrationSpec{ + TargetSpec: dbv1alpha1.TargetSpec{URL: tt.dburl}, + EnvName: "test", + ProjectConfigSpec: dbv1alpha1.ProjectConfigSpec{ + Config: `env "test" {}`, + }, + }, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") +} + func TestDefaultTemplate(t *testing.T) { migrate := &migrationData{ EnvName: defaultEnvName, @@ -1276,6 +1293,7 @@ func newMigrationTest(t *testing.T) *migrationTest { scheme: scheme, recorder: r, }, + allowCustomConfig: true, }, } } diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index 88e7849..37b9e17 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -65,6 +65,8 @@ type ( secretWatcher *watch.ResourceWatcher recorder record.EventRecorder devDB *devDBReconciler + // AllowCustomConfig allows the controller to use custom atlas.hcl config. + allowCustomConfig bool } // managedData contains information about the managed database and its desired state. managedData struct { @@ -85,17 +87,17 @@ type ( const sqlLimitSize = 1024 -func NewAtlasSchemaReconciler(mgr Manager, atlas AtlasExecFn, prewarmDevDB bool) *AtlasSchemaReconciler { - r := mgr.GetEventRecorderFor("atlasschema-controller") - return &AtlasSchemaReconciler{ +func NewAtlasSchemaReconciler(mgr Manager, prewarmDevDB bool) *AtlasSchemaReconciler { + rec := mgr.GetEventRecorderFor("atlasschema-controller") + r := &AtlasSchemaReconciler{ Client: mgr.GetClient(), scheme: mgr.GetScheme(), - atlasClient: atlas, configMapWatcher: watch.New(), secretWatcher: watch.New(), - recorder: r, - devDB: newDevDB(mgr, r, prewarmDevDB), + recorder: rec, + devDB: newDevDB(mgr, rec, prewarmDevDB), } + return r } // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -517,6 +519,16 @@ func (r *AtlasSchemaReconciler) watchRefs(res *dbv1alpha1.AtlasSchema) { } } +// SetAtlasClient sets the Atlas client function. +func (r *AtlasSchemaReconciler) SetAtlasClient(fn AtlasExecFn) { + r.atlasClient = fn +} + +// AllowCustomConfig allows the controller to use custom atlas.hcl config. +func (r *AtlasSchemaReconciler) AllowCustomConfig() { + r.allowCustomConfig = true +} + // extractData extracts the info about the managed database and its desired state. func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1.AtlasSchema) (_ *managedData, err error) { var ( @@ -535,6 +547,9 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 if err != nil { return nil, transient(err) } + if !r.allowCustomConfig && data.Config != nil { + return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") + } hasConfig := data.Config != nil if hasConfig && data.EnvName == "" { return nil, errors.New("env name must be set when using custom atlas.hcl config") diff --git a/internal/controller/atlasschema_controller_test.go b/internal/controller/atlasschema_controller_test.go index 11c7671..047b7ff 100644 --- a/internal/controller/atlasschema_controller_test.go +++ b/internal/controller/atlasschema_controller_test.go @@ -239,6 +239,20 @@ env "kubernetes" { require.True(t, hasTargets) } +func TestExtractData_DisabledCustomConfig(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +env "kubernetes" {} + ` + tt := newTest(t) + tt.r.allowCustomConfig = false + _, err := tt.r.extractData(context.Background(), sc) + require.Error(t, err) + require.Contains(t, err.Error(), "install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") +} + func TestReconcile_Credentials_BadPassSecret(t *testing.T) { tt := newTest(t) sc := conditionReconciling() @@ -635,6 +649,7 @@ func newTest(t *testing.T) *test { scheme: scheme, recorder: r, }, + allowCustomConfig: true, }, } } diff --git a/internal/controller/testhelper.go b/internal/controller/testhelper.go index 9c7c802..de7cad5 100644 --- a/internal/controller/testhelper.go +++ b/internal/controller/testhelper.go @@ -118,7 +118,10 @@ func (m *mockAtlasExec) MigrateStatus(context.Context, *atlasexec.MigrateStatusP } // newRunner returns a runner that can be used to test a reconcile.Reconciler. -func newRunner[T reconcile.Reconciler](fn func(Manager, AtlasExecFn, bool) T, modify func(*fake.ClientBuilder), mock *mockAtlasExec) (*helper, runner) { +func newRunner[T interface { + Reconcile(context.Context, reconcile.Request) (reconcile.Result, error) + SetAtlasClient(AtlasExecFn) +}](fn func(Manager, bool) T, modify func(*fake.ClientBuilder), mock *mockAtlasExec) (*helper, runner) { scheme := runtime.NewScheme() clientgoscheme.AddToScheme(scheme) dbv1alpha1.AddToScheme(scheme) @@ -132,12 +135,13 @@ func newRunner[T reconcile.Reconciler](fn func(Manager, AtlasExecFn, bool) T, mo client: c, recorder: r, scheme: scheme, - }, func(s string, c *Cloud) (AtlasExec, error) { + }, true) + a.SetAtlasClient(func(s string, c *Cloud) (AtlasExec, error) { if mock == nil { return NewAtlasExec(s, c) } return mock, nil - }, true) + }) h := &helper{client: c, recorder: r} return h, func(obj client.Object, fn check) { fn(a.Reconcile(context.Background(), request(obj))) diff --git a/skaffold.yaml b/skaffold.yaml index 697182d..7114f26 100644 --- a/skaffold.yaml +++ b/skaffold.yaml @@ -30,6 +30,7 @@ profiles: paths: - config/default - config/sqlserver + - config/customconfig - name: helm deploy: helm: From 5d14ec5e8f1f8b799138ca99905533e1ac994b18 Mon Sep 17 00:00:00 2001 From: Dat Dao Date: Tue, 24 Dec 2024 16:59:59 +0700 Subject: [PATCH 5/7] test/e2e/testscript: add multi-targets --- test/e2e/testscript/multi-tenancy.txtar | 154 ++++++++++++++++++++++++ 1 file changed, 154 insertions(+) create mode 100644 test/e2e/testscript/multi-tenancy.txtar diff --git a/test/e2e/testscript/multi-tenancy.txtar b/test/e2e/testscript/multi-tenancy.txtar new file mode 100644 index 0000000..5d0c226 --- /dev/null +++ b/test/e2e/testscript/multi-tenancy.txtar @@ -0,0 +1,154 @@ +env DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable +env DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable +kubectl apply -f database.yaml +kubectl create secret generic db-creds --from-literal=url=${DB_URL} +kubectl create configmap db-dev-creds --from-literal=url=${DB_DEV_URL} + +# Wait for the first pod created +kubectl-wait-available deploy/postgres +# Wait for the DB ready before creating the schema +kubectl-wait-ready -l app=postgres pods + +# Create the schema +kubectl exec -i deploy/postgres -- psql -U root -d postgres -c 'CREATE SCHEMA tenant_1;' +kubectl exec -i deploy/postgres -- psql -U root -d postgres -c 'CREATE SCHEMA tenant_2;' + +# Inspect the schema to ensure it's correct +atlas schema inspect -u ${DB_URL} +cmp stdout schema.hcl + +kubectl apply -f multi-tenancy.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=MultipleTargets --timeout=500s AtlasSchemas/multi-tenancy + +-- multi-tenancy.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + name: multi-tenancy +spec: + envName: "test" + schema: + sql: | + create table users ( + id int not null, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + data "sql" "tenants" { + url = var.db_url + query = < Date: Thu, 26 Dec 2024 14:29:22 +0700 Subject: [PATCH 6/7] requires login to customize atlas.hcl --- .../controller/atlasmigration_controller.go | 44 ++++++++++---- .../atlasmigration_controller_test.go | 59 +------------------ internal/controller/atlasschema_controller.go | 6 ++ test/e2e/testscript/custom-config.txtar | 13 ++++ test/e2e/testscript/multi-tenancy.txtar | 7 +++ 5 files changed, 61 insertions(+), 68 deletions(-) diff --git a/internal/controller/atlasmigration_controller.go b/internal/controller/atlasmigration_controller.go index c6734cc..4123969 100644 --- a/internal/controller/atlasmigration_controller.go +++ b/internal/controller/atlasmigration_controller.go @@ -271,10 +271,27 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio return err } defer wd.Close() - c, err := r.atlasClient(wd.Path(), nil) + c, err := r.atlasClient(wd.Path(), data.Cloud) if err != nil { return err } + var whoami *atlasexec.WhoAmI + switch whoami, err = c.WhoAmI(ctx); { + case errors.Is(err, atlasexec.ErrRequireLogin): + log.Info("the resource is not connected to Atlas Cloud") + if data.Config != nil { + err = errors.New("login is required to use custom atlas.hcl config") + res.SetNotReady("WhoAmI", err.Error()) + r.recordErrEvent(res, err) + return err + } + case err != nil: + res.SetNotReady("WhoAmI", err.Error()) + r.recordErrEvent(res, err) + return err + default: + log.Info("the resource is connected to Atlas Cloud", "org", whoami.Org) + } log.Info("reconciling migration", "env", data.EnvName) // Check if there are any pending migration files status, err := c.MigrateStatus(ctx, &atlasexec.MigrateStatusParams{Env: data.EnvName, Vars: data.Vars}) @@ -456,10 +473,24 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp if !hasConfig && data.URL == nil { return nil, transient(errors.New("no target database defined")) } + if s := s.Cloud.TokenFrom.SecretKeyRef; s != nil { + token, err := getSecretValue(ctx, r, res.Namespace, s) + if err != nil { + return nil, err + } + data.Cloud = &Cloud{Token: token} + } + if s.Cloud.Project != "" || s.Cloud.URL != "" { + if data.Cloud == nil { + data.Cloud = &Cloud{} + } + data.Cloud.Repo = s.Cloud.Project + data.Cloud.URL = s.Cloud.URL + } switch d := s.Dir; { case d.Remote.Name != "": c := s.Cloud - if c.TokenFrom.SecretKeyRef == nil { + if c.TokenFrom.SecretKeyRef == nil && !hasConfig { return nil, errors.New("cannot use remote directory without Atlas Cloud token") } if f := s.ProtectedFlows; f != nil { @@ -470,15 +501,6 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp data.MigrateDown = d.Allow } } - token, err := getSecretValue(ctx, r, res.Namespace, c.TokenFrom.SecretKeyRef) - if err != nil { - return nil, err - } - data.Cloud = &Cloud{ - Token: token, - Repo: c.Project, - URL: c.URL, - } data.RemoteDir = &d.Remote case d.Local != nil || d.ConfigMapRef != nil: if d.Local != nil && d.ConfigMapRef != nil { diff --git a/internal/controller/atlasmigration_controller_test.go b/internal/controller/atlasmigration_controller_test.go index 2f5514d..a8688e0 100644 --- a/internal/controller/atlasmigration_controller_test.go +++ b/internal/controller/atlasmigration_controller_test.go @@ -18,16 +18,12 @@ import ( "bytes" "context" "encoding/base64" - "encoding/json" "fmt" "io" "io/fs" - "net/http" - "net/http/httptest" "net/url" "os" "path/filepath" - "strings" "testing" "time" @@ -294,6 +290,7 @@ func TestMigration_MigrateDown_Remote_Protected(t *testing.T) { } ) mockExec := &mockAtlasExec{} + mockExec.whoami.res = &atlasexec.WhoAmI{Org: "my-org"} mockExec.status.res = &atlasexec.MigrateStatus{ Current: "2", Applied: []*atlasexec.Revision{ @@ -421,6 +418,7 @@ func TestMigration_MigrateDown_Local(t *testing.T) { `, })) mockExec := &mockAtlasExec{} + mockExec.whoami.res = &atlasexec.WhoAmI{Org: "my-org"} mockExec.status.res = &atlasexec.MigrateStatus{ Current: "2", Applied: []*atlasexec.Revision{ @@ -1183,59 +1181,6 @@ env "kubernetes" { `, fileContent.String()) } -func TestMigrationWithDeploymentContext(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - type ( - RunContext struct { - TriggerType string `json:"triggerType,omitempty"` - TriggerVersion string `json:"triggerVersion,omitempty"` - } - graphQLQuery struct { - Query string `json:"query"` - Variables json.RawMessage `json:"variables"` - MigrateApplyReport struct { - Input struct { - Context *RunContext `json:"context,omitempty"` - } `json:"input"` - } - } - ) - var m graphQLQuery - require.NoError(t, json.NewDecoder(r.Body).Decode(&m)) - switch { - case strings.Contains(m.Query, "query"): - memdir := &migrate.MemDir{} - memdir.WriteFile("30230412003626.sql", []byte(`CREATE TABLE foo (id INT PRIMARY KEY)`)) - writeDir(t, memdir, w) - case strings.Contains(m.Query, "reportMigration"): - err := json.Unmarshal(m.Variables, &m.MigrateApplyReport) - require.NoError(t, err) - require.Equal(t, "my-version", m.MigrateApplyReport.Input.Context.TriggerVersion) - require.Equal(t, "KUBERNETES", m.MigrateApplyReport.Input.Context.TriggerType) - } - })) - defer srv.Close() - tt := migrationCliTest(t) - tt.initDefaultTokenSecret() - am := tt.getAtlasMigration() - am.Spec.Cloud.URL = srv.URL - am.Spec.Dir.Remote.Name = "my-remote-dir" - am.Spec.Cloud.Project = "my-project" - am.Spec.Cloud.TokenFrom = dbv1alpha1.TokenFrom{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "my-secret", - }, - Key: "token", - }, - } - tt.k8s.put(am) - ctx := dbv1alpha1.WithVersionContext(context.Background(), "my-version") - result, err := tt.r.Reconcile(ctx, migrationReq()) - require.NoError(tt, err) - require.EqualValues(tt, reconcile.Result{}, result) -} - func migrationObjmeta() metav1.ObjectMeta { return metav1.ObjectMeta{ Name: "atlas-migration", diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index 37b9e17..62bc15d 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -203,6 +203,12 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) switch whoami, err = cli.WhoAmI(ctx); { case errors.Is(err, atlasexec.ErrRequireLogin): log.Info("the resource is not connected to Atlas Cloud") + if data.Config != nil { + err = errors.New("login is required to use custom atlas.hcl config") + res.SetNotReady("WhoAmI", err.Error()) + r.recordErrEvent(res, err) + return result(err) + } case err != nil: res.SetNotReady("WhoAmI", err.Error()) r.recordErrEvent(res, err) diff --git a/test/e2e/testscript/custom-config.txtar b/test/e2e/testscript/custom-config.txtar index a302c6e..db63a4e 100644 --- a/test/e2e/testscript/custom-config.txtar +++ b/test/e2e/testscript/custom-config.txtar @@ -7,6 +7,8 @@ kubectl create secret generic schema-db-creds --from-literal=url=${SCHEMA_DB_URL kubectl create configmap schema-db-dev-creds --from-literal=url=${SCHEMA_DB_DEV_URL} kubectl create secret generic migrate-db-creds --from-literal=url=${MIGRATE_DB_URL} kubectl create configmap migrate-db-dev-creds --from-literal=url=${MIGRATE_DB_DEV_URL} +# Create the secret to store ATLAS_TOKEN +kubectl create secret generic atlas-token --from-literal=ATLAS_TOKEN=${ATLAS_TOKEN} # Wait for the first pod created kubectl-wait-available deploy/postgres @@ -51,6 +53,12 @@ spec: short_bio varchar(255) not null, primary key (id) ); + cloud: + repo: atlas-operator + tokenFrom: + secretKeyRef: + name: atlas-token + key: ATLAS_TOKEN vars: - key: "db_url" valueFrom: @@ -119,6 +127,11 @@ spec: configMapKeyRef: name: migrate-db-dev-creds key: url + cloud: + tokenFrom: + secretKeyRef: + name: atlas-token + key: ATLAS_TOKEN config: | variable "db_url" { type = string diff --git a/test/e2e/testscript/multi-tenancy.txtar b/test/e2e/testscript/multi-tenancy.txtar index 5d0c226..31aecdb 100644 --- a/test/e2e/testscript/multi-tenancy.txtar +++ b/test/e2e/testscript/multi-tenancy.txtar @@ -3,6 +3,7 @@ env DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode= kubectl apply -f database.yaml kubectl create secret generic db-creds --from-literal=url=${DB_URL} kubectl create configmap db-dev-creds --from-literal=url=${DB_DEV_URL} +kubectl create secret generic atlas-token --from-literal=ATLAS_TOKEN=${ATLAS_TOKEN} # Wait for the first pod created kubectl-wait-available deploy/postgres @@ -36,6 +37,12 @@ spec: short_bio varchar(255) not null, primary key (id) ); + cloud: + repo: atlas-operator + tokenFrom: + secretKeyRef: + name: atlas-token + key: ATLAS_TOKEN vars: - key: "db_url" valueFrom: From cefd6f83850da3ff172133e3249cdd2ea4c30f58 Mon Sep 17 00:00:00 2001 From: Dat Dao Date: Thu, 2 Jan 2025 11:37:05 +0700 Subject: [PATCH 7/7] resolve comments --- README.md | 2 +- api/v1alpha1/project_config.go | 2 +- charts/atlas-operator/values.yaml | 3 +- .../controller/atlasmigration_controller.go | 33 +++++++++++--- internal/controller/atlasschema_controller.go | 40 +++++++++++++---- .../controller/atlasschema_controller_test.go | 45 +++++++++++++++++++ internal/controller/common_test.go | 22 +++++++++ test/e2e/testscript/multi-tenancy.txtar | 2 +- 8 files changed, 130 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index ddaf335..cd782a2 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ To use variables in the `config` field: } ``` -> Note: Allowing custom configuration can elevate privileges of the operator. Use this feature with caution. +> Note: Allowing custom configuration enables executing arbitrary commands using the `external` data source as well as arbitrary SQL using the `sql` data source. Use this feature with caution. - `extraEnvs`: Used to set environment variables for the operator diff --git a/api/v1alpha1/project_config.go b/api/v1alpha1/project_config.go index 508abd7..89d7718 100644 --- a/api/v1alpha1/project_config.go +++ b/api/v1alpha1/project_config.go @@ -78,7 +78,7 @@ func (s ProjectConfigSpec) GetConfig(ctx context.Context, r client.Reader, ns st // GetVars returns the input variables for the project configuration. // The variables are resolved from the secret or configmap reference. func (s ProjectConfigSpec) GetVars(ctx context.Context, r client.Reader, ns string) (atlasexec.Vars2, error) { - vars := make(map[string]any) + vars := atlasexec.Vars2{} for _, variable := range s.Vars { var ( value string diff --git a/charts/atlas-operator/values.yaml b/charts/atlas-operator/values.yaml index 67e5c8d..63ba6ca 100644 --- a/charts/atlas-operator/values.yaml +++ b/charts/atlas-operator/values.yaml @@ -49,7 +49,8 @@ affinity: {} prewarmDevDB: true # Enable this to allow custom project configuration -# Warning: This action will elevate the privileges of the operator +# Warning: This setting enables users to use the `external` and `sql` data sources +# which can be used to execute arbitrary commands and queries. Use with caution. allowCustomConfig: false # -- Additional environment variables to set diff --git a/internal/controller/atlasmigration_controller.go b/internal/controller/atlasmigration_controller.go index 4123969..3f9b5ac 100644 --- a/internal/controller/atlasmigration_controller.go +++ b/internal/controller/atlasmigration_controller.go @@ -150,8 +150,7 @@ func (r *AtlasMigrationReconciler) Reconcile(ctx context.Context, req ctrl.Reque // TODO(giautm): Create DevDB and run linter for new migration // files before applying it to the target database. - - if data.URL != nil && data.DevURL == "" { + if !data.hasDevURL() && data.URL != nil { // The user has not specified an URL for dev-db, // spin up a dev-db and get the connection string. data.DevURL, err = r.devDB.devURL(ctx, res, *data.URL) @@ -457,12 +456,14 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp if err != nil { return nil, transient(err) } - if !r.allowCustomConfig && data.Config != nil { - return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") - } hasConfig := data.Config != nil - if hasConfig && data.EnvName == "" { - return nil, errors.New("env name must be set when using custom atlas.hcl config") + if hasConfig { + if !r.allowCustomConfig { + return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") + } + if s.EnvName == "" { + return nil, errors.New("env name must be set when using custom atlas.hcl config") + } } if env := s.EnvName; env != "" { data.EnvName = env @@ -664,6 +665,24 @@ func (c *migrationData) hasRemoteDir() bool { return c.RemoteDir != nil && c.RemoteDir.Name != "" } +// hasDevURL returns true if the given migration data has a dev URL +func (d *migrationData) hasDevURL() bool { + if d.DevURL != "" { + return true + } + if d.Config == nil { + return false + } + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env != nil { + dev := env.Body().GetAttribute("dev") + if dev != nil { + return true + } + } + return false +} + // asBlocks returns the HCL blocks for the given migration data func (d *migrationData) asBlocks() []*hclwrite.Block { var blocks []*hclwrite.Block diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index 62bc15d..faa61f8 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -137,7 +137,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) return result(err) } if data.hasTargets() { - res.SetNotReady("MultipleTargets", "Multiple targets are not supported") + res.SetNotReady("ReadSchema", "Multiple targets are not supported") return ctrl.Result{}, nil } hash, err := data.hash() @@ -157,7 +157,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Starting area to handle the heavy jobs. // Below this line is the main logic of the controller. // ==================================================== - if data.URL != nil && data.DevURL == "" { + if !data.hasDevURL() && data.URL != nil { // The user has not specified an URL for dev-db, // spin up a dev-db and get the connection string. data.DevURL, err = r.devDB.devURL(ctx, res, *data.URL) @@ -553,12 +553,14 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 if err != nil { return nil, transient(err) } - if !r.allowCustomConfig && data.Config != nil { - return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") - } hasConfig := data.Config != nil - if hasConfig && data.EnvName == "" { - return nil, errors.New("env name must be set when using custom atlas.hcl config") + if hasConfig { + if !r.allowCustomConfig { + return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") + } + if s.EnvName == "" { + return nil, errors.New("env name must be set when using custom atlas.hcl config") + } } if s := s.EnvName; s != "" { data.EnvName = s @@ -624,7 +626,11 @@ func (d *managedData) shouldLint() (bool, error) { } lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) if lint == nil { - return false, nil + // search global lint block + lint = searchBlock(d.Config.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + return false, nil + } } if v := searchBlock(lint.Body(), hclwrite.NewBlock("destructive", nil)); v != nil { destructiveErr := v.Body().GetAttribute("error") @@ -644,6 +650,24 @@ func (d *managedData) shouldLint() (bool, error) { return p.Lint.Destructive.Error, nil } +// hasDevURL returns true if the environment has a dev URL. +func (d *managedData) hasDevURL() bool { + if d.DevURL != "" { + return true + } + if d.Config == nil { + return false + } + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env != nil { + dev := env.Body().GetAttribute("dev") + if dev != nil { + return true + } + } + return false +} + // hasTargets returns true if the environment has multiple targets/ multi-tenancy. func (d *managedData) hasTargets() bool { if d.Config == nil { diff --git a/internal/controller/atlasschema_controller_test.go b/internal/controller/atlasschema_controller_test.go index 047b7ff..bea073e 100644 --- a/internal/controller/atlasschema_controller_test.go +++ b/internal/controller/atlasschema_controller_test.go @@ -223,6 +223,26 @@ env "kubernetes" { require.Contains(t, err.Error(), "cannot determine the value of the destructive.error attribute") } +func TestExtractData_GlobalLintBlock(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +lint { + destructive { + error = true + } +} +env "kubernetes" {} + ` + tt := newTest(t) + data, err := tt.r.extractData(context.Background(), sc) + require.NoError(t, err) + lint, err := data.shouldLint() + require.NoError(t, err) + require.True(t, lint) +} + func TestExtractData_MultiTargets(t *testing.T) { sc := conditionReconciling() sc.Spec.DevURL = "mysql://dev" @@ -559,6 +579,31 @@ env "kubernetes" { require.EqualValues(t, expected, buf.String()) } +func TestCustomAtlasHCL_UnnamedBlock(t *testing.T) { + var buf bytes.Buffer + data := &managedData{ + EnvName: defaultEnvName, + URL: must(url.Parse("mysql://root:password@localhost:3306/test")), + Desired: must(url.Parse("file://schema.sql")), + Schemas: []string{"foo", "bar"}, + Config: mustParseHCL(` +env { + name = atlas.env + dev = "mysql://root:password@localhost:3306/dev" +}`), + } + err := data.render(&buf) + require.NoError(t, err) + expected := ` +env { + name = atlas.env + dev = "mysql://root:password@localhost:3306/dev" + schemas = ["foo", "bar"] + url = "mysql://root:password@localhost:3306/test" +}` + require.EqualValues(t, expected, buf.String()) +} + func conditionReconciling() *dbv1alpha1.AtlasSchema { return &dbv1alpha1.AtlasSchema{ ObjectMeta: objmeta(), diff --git a/internal/controller/common_test.go b/internal/controller/common_test.go index 918e85f..3f54ffe 100644 --- a/internal/controller/common_test.go +++ b/internal/controller/common_test.go @@ -98,6 +98,28 @@ env "example" { expected: ` env "example" { key = "value2" +}`, + }, + { + name: "merge unnamed blocks", + args: args{ + dst: ` +env { + name = atlas.env + key = "value" +}`, + src: ` +env { + name = atlas.env + key2 = "value2" +} +`, + }, + expected: ` +env { + name = atlas.env + key = "value" + key2 = "value2" }`, }, } diff --git a/test/e2e/testscript/multi-tenancy.txtar b/test/e2e/testscript/multi-tenancy.txtar index 31aecdb..ec9d5e8 100644 --- a/test/e2e/testscript/multi-tenancy.txtar +++ b/test/e2e/testscript/multi-tenancy.txtar @@ -19,7 +19,7 @@ atlas schema inspect -u ${DB_URL} cmp stdout schema.hcl kubectl apply -f multi-tenancy.yaml -kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=MultipleTargets --timeout=500s AtlasSchemas/multi-tenancy +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=ReadSchema --timeout=500s AtlasSchemas/multi-tenancy -- multi-tenancy.yaml -- apiVersion: db.atlasgo.io/v1alpha1