diff --git a/README.md b/README.md index 5c792cc..ddaf335 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,51 @@ To configure the operator, you can set the following values in the `values.yaml` - `prewarmDevDB`: The Operator always keeps devdb resources around to speed up the migration process. Set this to `false` to disable this feature. +- `allowCustomConfig`: Enable this to allow custom `atlas.hcl` configuration. To use this feature, you can set the `config` field in the `AtlasSchema` or `AtlasMigration` resource. + +```yaml + spec: + envName: myenv + config: | + env myenv {} + # config from secretKeyRef + # configFrom: + # secretKeyRef: + # key: config + # name: my-secret +``` + +To use variables in the `config` field: + +```yaml + spec: + envName: myenv + variables: + - name: db_url + value: "mysql://root" + # variables from secretKeyRef + # - name: db_url + # valueFrom: + # secretKeyRef: + # key: db_url + # name: my-secret + # variables from configMapKeyRef + # - name: db_url + # valueFrom: + # configMapKeyRef: + # key: db_url + # name: my-configmap + config: | + variable "db_url" { + type = string + } + env myenv { + url = var.db_url + } +``` + +> Note: Allowing custom configuration can elevate privileges of the operator. Use this feature with caution. + - `extraEnvs`: Used to set environment variables for the operator ```yaml diff --git a/api/v1alpha1/atlasmigration_types.go b/api/v1alpha1/atlasmigration_types.go index feb44e3..4fd7623 100644 --- a/api/v1alpha1/atlasmigration_types.go +++ b/api/v1alpha1/atlasmigration_types.go @@ -61,7 +61,8 @@ type ( } // AtlasMigrationSpec defines the desired state of AtlasMigration AtlasMigrationSpec struct { - TargetSpec `json:",inline"` + TargetSpec `json:",inline"` + ProjectConfigSpec `json:",inline"` // EnvName sets the environment name used for reporting runs to Atlas Cloud. EnvName string `json:"envName,omitempty"` // Cloud defines the Atlas Cloud configuration. diff --git a/api/v1alpha1/atlasschema_types.go b/api/v1alpha1/atlasschema_types.go index 2ef3ab0..cf379ef 100644 --- a/api/v1alpha1/atlasschema_types.go +++ b/api/v1alpha1/atlasschema_types.go @@ -71,7 +71,8 @@ type ( } // AtlasSchemaSpec defines the desired state of AtlasSchema AtlasSchemaSpec struct { - TargetSpec `json:",inline"` + TargetSpec `json:",inline"` + ProjectConfigSpec `json:",inline"` // Desired Schema of the target. Schema Schema `json:"schema,omitempty"` // Cloud defines the Atlas Cloud configuration. diff --git a/api/v1alpha1/project_config.go b/api/v1alpha1/project_config.go new file mode 100644 index 0000000..508abd7 --- /dev/null +++ b/api/v1alpha1/project_config.go @@ -0,0 +1,112 @@ +// Copyright 2023 The Atlas Operator Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha1 + +import ( + "context" + "fmt" + + "ariga.io/atlas-go-sdk/atlasexec" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ( + // ProjectConfigSpec defines the project configuration. + ProjectConfigSpec struct { + // Config defines the project configuration. + // Should be a valid YAML string. + Config string `json:"config,omitempty"` + // ConfigFrom defines the reference to the secret key that contains the project configuration. + ConfigFrom Secret `json:"configFrom,omitempty"` + // EnvName defines the environment name that defined in the project configuration. + // If not defined, the default environment "k8s" will be used. + EnvName string `json:"envName,omitempty"` + // Vars defines the input variables for the project configuration. + Vars []Variable `json:"vars,omitempty"` + } + // Variables defines the reference of secret/configmap to the input variables for the project configuration. + Variable struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` + ValueFrom ValueFrom `json:"valueFrom,omitempty"` + } + // ValueFrom defines the reference to the secret key that contains the value. + ValueFrom struct { + // SecretKeyRef defines the secret key reference to use for the value. + SecretKeyRef *corev1.SecretKeySelector `json:"secretKeyRef,omitempty"` + // ConfigMapKeyRef defines the configmap key reference to use for the value. + ConfigMapKeyRef *corev1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty"` + } +) + +// GetConfig returns the project configuration. +// The configuration is resolved from the secret reference. +func (s ProjectConfigSpec) GetConfig(ctx context.Context, r client.Reader, ns string) (*hclwrite.File, error) { + rawConfig := s.Config + if s.ConfigFrom.SecretKeyRef != nil { + cfgFromSecret, err := getSecretValue(ctx, r, ns, s.ConfigFrom.SecretKeyRef) + if err != nil { + return nil, err + } + rawConfig = cfgFromSecret + } + if rawConfig == "" { + return nil, nil + } + config, diags := hclwrite.ParseConfig([]byte(rawConfig), "", hcl.InitialPos) + if diags.HasErrors() { + return nil, fmt.Errorf("failed to parse project configuration: %v", diags) + } + return config, nil +} + +// GetVars returns the input variables for the project configuration. +// The variables are resolved from the secret or configmap reference. +func (s ProjectConfigSpec) GetVars(ctx context.Context, r client.Reader, ns string) (atlasexec.Vars2, error) { + vars := make(map[string]any) + for _, variable := range s.Vars { + var ( + value string + err error + ) + value = variable.Value + if variable.ValueFrom.SecretKeyRef != nil { + if value, err = getSecretValue(ctx, r, ns, variable.ValueFrom.SecretKeyRef); err != nil { + return nil, err + } + } + if variable.ValueFrom.ConfigMapKeyRef != nil { + if value, err = getConfigMapValue(ctx, r, ns, variable.ValueFrom.ConfigMapKeyRef); err != nil { + return nil, err + } + } + // Resolve variables with the same key by grouping them into a slice. + // It's necessary when generating Atlas command for list(string) input type. + if existingValue, exists := vars[variable.Key]; exists { + if _, ok := existingValue.([]string); ok { + vars[variable.Key] = append(existingValue.([]string), value) + } else if _, ok := existingValue.(string); ok { + vars[variable.Key] = []string{existingValue.(string), value} + } else { + return nil, fmt.Errorf("invalid variable type for %q", variable.Key) + } + } + vars[variable.Key] = value + } + return vars, nil +} diff --git a/api/v1alpha1/target.go b/api/v1alpha1/target.go index 7d08677..f5cfee7 100644 --- a/api/v1alpha1/target.go +++ b/api/v1alpha1/target.go @@ -66,7 +66,7 @@ type ( // DatabaseURL returns the database url. func (s TargetSpec) DatabaseURL(ctx context.Context, r client.Reader, ns string) (*url.URL, error) { if s.URLFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.URLFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.URLFrom.SecretKeyRef) if err != nil { return nil, err } @@ -76,21 +76,21 @@ func (s TargetSpec) DatabaseURL(ctx context.Context, r client.Reader, ns string) return url.Parse(s.URL) } if s.Credentials.UserFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.Credentials.UserFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.Credentials.UserFrom.SecretKeyRef) if err != nil { return nil, err } s.Credentials.User = val } if s.Credentials.PasswordFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.Credentials.PasswordFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.Credentials.PasswordFrom.SecretKeyRef) if err != nil { return nil, err } s.Credentials.Password = val } if s.Credentials.HostFrom.SecretKeyRef != nil { - val, err := getSecrectValue(ctx, r, ns, s.Credentials.HostFrom.SecretKeyRef) + val, err := getSecretValue(ctx, r, ns, s.Credentials.HostFrom.SecretKeyRef) if err != nil { return nil, err } @@ -99,7 +99,7 @@ func (s TargetSpec) DatabaseURL(ctx context.Context, r client.Reader, ns string) if s.Credentials.Host != "" { return s.Credentials.URL(), nil } - return nil, fmt.Errorf("no target database defined") + return nil, nil } // URL returns the URL for the database. @@ -133,7 +133,7 @@ func (c *Credentials) URL() *url.URL { return u } -func getSecrectValue( +func getSecretValue( ctx context.Context, r client.Reader, ns string, @@ -147,6 +147,20 @@ func getSecrectValue( return string(val.Data[ref.Key]), nil } +func getConfigMapValue( + ctx context.Context, + r client.Reader, + ns string, + ref *corev1.ConfigMapKeySelector, +) (string, error) { + val := &corev1.ConfigMap{} + err := r.Get(ctx, types.NamespacedName{Name: ref.Name, Namespace: ns}, val) + if err != nil { + return "", err + } + return string(val.Data[ref.Key]), nil +} + // Driver defines the database driver. type Driver string diff --git a/api/v1alpha1/types_test.go b/api/v1alpha1/types_test.go index 71aa4e7..80f094b 100644 --- a/api/v1alpha1/types_test.go +++ b/api/v1alpha1/types_test.go @@ -53,9 +53,6 @@ func TestTargetSpec_DatabaseURL(t *testing.T) { require.Equal(t, a, u.String()) } ) - // error - _, err := target.DatabaseURL(ctx, nil, "default") - require.ErrorContains(t, err, "no target database defined") // Should return the URL from the credentials target.Credentials = v1alpha1.Credentials{ diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index a7d2354..5dc360c 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -87,6 +87,7 @@ func (in *AtlasMigrationList) DeepCopyObject() runtime.Object { func (in *AtlasMigrationSpec) DeepCopyInto(out *AtlasMigrationSpec) { *out = *in in.TargetSpec.DeepCopyInto(&out.TargetSpec) + in.ProjectConfigSpec.DeepCopyInto(&out.ProjectConfigSpec) in.Cloud.DeepCopyInto(&out.Cloud) in.Dir.DeepCopyInto(&out.Dir) in.DevURLFrom.DeepCopyInto(&out.DevURLFrom) @@ -192,6 +193,7 @@ func (in *AtlasSchemaList) DeepCopyObject() runtime.Object { func (in *AtlasSchemaSpec) DeepCopyInto(out *AtlasSchemaSpec) { *out = *in in.TargetSpec.DeepCopyInto(&out.TargetSpec) + in.ProjectConfigSpec.DeepCopyInto(&out.ProjectConfigSpec) in.Schema.DeepCopyInto(&out.Schema) in.Cloud.DeepCopyInto(&out.Cloud) in.DevURLFrom.DeepCopyInto(&out.DevURLFrom) @@ -444,6 +446,29 @@ func (in *Policy) DeepCopy() *Policy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectConfigSpec) DeepCopyInto(out *ProjectConfigSpec) { + *out = *in + in.ConfigFrom.DeepCopyInto(&out.ConfigFrom) + if in.Vars != nil { + in, out := &in.Vars, &out.Vars + *out = make([]Variable, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfigSpec. +func (in *ProjectConfigSpec) DeepCopy() *ProjectConfigSpec { + if in == nil { + return nil + } + out := new(ProjectConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProtectFlows) DeepCopyInto(out *ProtectFlows) { *out = *in @@ -570,3 +595,44 @@ func (in *TokenFrom) DeepCopy() *TokenFrom { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { + *out = *in + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(corev1.ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom. +func (in *ValueFrom) DeepCopy() *ValueFrom { + if in == nil { + return nil + } + out := new(ValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Variable) DeepCopyInto(out *Variable) { + *out = *in + in.ValueFrom.DeepCopyInto(&out.ValueFrom) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable. +func (in *Variable) DeepCopy() *Variable { + if in == nil { + return nil + } + out := new(Variable) + in.DeepCopyInto(out) + return out +} diff --git a/charts/atlas-operator/templates/crds/crd.yaml b/charts/atlas-operator/templates/crds/crd.yaml index 62ca8cb..9b442cb 100644 --- a/charts/atlas-operator/templates/crds/crd.yaml +++ b/charts/atlas-operator/templates/crds/crd.yaml @@ -87,6 +87,41 @@ spec: url: type: string type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -264,8 +299,9 @@ spec: type: object type: object envName: - description: EnvName sets the environment name used for reporting - runs to Atlas Cloud. + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. type: string execOrder: default: linear @@ -329,6 +365,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array required: - dir type: object @@ -507,6 +609,41 @@ spec: x-kubernetes-map-type: atomic type: object type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -651,6 +788,11 @@ spec: type: object x-kubernetes-map-type: atomic type: object + envName: + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. + type: string exclude: description: Exclude a list of glob patterns used to filter existing resources being taken into account. @@ -809,6 +951,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array type: object status: description: AtlasSchemaStatus defines the observed state of AtlasSchema diff --git a/charts/atlas-operator/templates/deployment.yaml b/charts/atlas-operator/templates/deployment.yaml index b964666..c3b7c4b 100644 --- a/charts/atlas-operator/templates/deployment.yaml +++ b/charts/atlas-operator/templates/deployment.yaml @@ -55,6 +55,8 @@ spec: env: - name: PREWARM_DEVDB value: "{{ .Values.prewarmDevDB }}" + - name: ALLOW_CUSTOM_CONFIG + value: "{{ .Values.allowCustomConfig }}" {{- with .Values.extraEnvs }} {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/atlas-operator/values.yaml b/charts/atlas-operator/values.yaml index 44a5e19..67e5c8d 100644 --- a/charts/atlas-operator/values.yaml +++ b/charts/atlas-operator/values.yaml @@ -48,6 +48,10 @@ affinity: {} # Set this to true to keep the devdb pods around. prewarmDevDB: true +# Enable this to allow custom project configuration +# Warning: This action will elevate the privileges of the operator +allowCustomConfig: false + # -- Additional environment variables to set extraEnvs: [] # extraEnvs: diff --git a/cmd/main.go b/cmd/main.go index 1de3d0e..837892f 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -58,6 +58,8 @@ const ( vercheckURL = "https://vercheck.ariga.io" // prewarmDevDB when disabled it deletes the devDB pods after the schema is created prewarmDevDB = "PREWARM_DEVDB" + // allowCustomConfig when enabled it allows the use of custom config + allowsCustomConfig = "ALLOW_CUSTOM_CONFIG" ) func init() { @@ -141,13 +143,24 @@ func main() { os.Exit(1) } prewarmDevDB := getPrewarmDevDBEnv() - if err = controller.NewAtlasSchemaReconciler(mgr, controller.NewAtlasExec, prewarmDevDB). - SetupWithManager(mgr); err != nil { + allowCustomConfig := getAllowCustomConfigEnv() + // Setup controller for AtlasSchema + schemaController := controller.NewAtlasSchemaReconciler(mgr, prewarmDevDB) + schemaController.SetAtlasClient(controller.NewAtlasExec) + if allowCustomConfig { + schemaController.AllowCustomConfig() + } + if err := schemaController.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AtlasSchema") os.Exit(1) } - if err = controller.NewAtlasMigrationReconciler(mgr, controller.NewAtlasExec, prewarmDevDB). - SetupWithManager(mgr); err != nil { + // Setup controller for AtlasMigration + migrationController := controller.NewAtlasMigrationReconciler(mgr, prewarmDevDB) + migrationController.SetAtlasClient(controller.NewAtlasExec) + if allowCustomConfig { + migrationController.AllowCustomConfig() + } + if err = migrationController.SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AtlasMigration") os.Exit(1) } @@ -219,3 +232,18 @@ func getPrewarmDevDBEnv() bool { } return prewarmDevDB } + +// getAllowCustomConfigEnv returns the value of the env var ALLOW_CUSTOM_CONFIG. +// if the env var is not set, it returns false. +func getAllowCustomConfigEnv() bool { + env := os.Getenv(allowsCustomConfig) + if env == "" { + return false + } + allowsCustomConfig, err := strconv.ParseBool(env) + if err != nil { + setupLog.Error(err, "invalid value for env var ALLOW_CUSTOM_CONFIG, expected true or false") + os.Exit(1) + } + return allowsCustomConfig +} diff --git a/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml b/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml index bffe266..d083760 100644 --- a/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml +++ b/config/crd/bases/db.atlasgo.io_atlasmigrations.yaml @@ -101,6 +101,41 @@ spec: url: type: string type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -278,8 +313,9 @@ spec: type: object type: object envName: - description: EnvName sets the environment name used for reporting - runs to Atlas Cloud. + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. type: string execOrder: default: linear @@ -343,6 +379,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array required: - dir type: object diff --git a/config/crd/bases/db.atlasgo.io_atlasschemas.yaml b/config/crd/bases/db.atlasgo.io_atlasschemas.yaml index 218c7ab..50cca18 100644 --- a/config/crd/bases/db.atlasgo.io_atlasschemas.yaml +++ b/config/crd/bases/db.atlasgo.io_atlasschemas.yaml @@ -96,6 +96,41 @@ spec: x-kubernetes-map-type: atomic type: object type: object + config: + description: |- + Config defines the project configuration. + Should be a valid YAML string. + type: string + configFrom: + description: ConfigFrom defines the reference to the secret key that + contains the project configuration. + properties: + secretKeyRef: + description: SecretKeyRef defines the secret key reference to + use for the user. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object credentials: description: |- Credentials defines the credentials to use when connecting to the database. @@ -240,6 +275,11 @@ spec: type: object x-kubernetes-map-type: atomic type: object + envName: + description: |- + EnvName defines the environment name that defined in the project configuration. + If not defined, the default environment "k8s" will be used. + type: string exclude: description: Exclude a list of glob patterns used to filter existing resources being taken into account. @@ -398,6 +438,72 @@ spec: type: object x-kubernetes-map-type: atomic type: object + vars: + description: Vars defines the input variables for the project configuration. + items: + description: Variables defines the reference of secret/configmap + to the input variables for the project configuration. + properties: + key: + type: string + value: + type: string + valueFrom: + description: ValueFrom defines the reference to the secret key + that contains the value. + properties: + configMapKeyRef: + description: ConfigMapKeyRef defines the configmap key reference + to use for the value. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: SecretKeyRef defines the secret key reference + to use for the value. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: array type: object status: description: AtlasSchemaStatus defines the observed state of AtlasSchema diff --git a/config/customconfig/kustomization.yaml b/config/customconfig/kustomization.yaml new file mode 100644 index 0000000..787dc7a --- /dev/null +++ b/config/customconfig/kustomization.yaml @@ -0,0 +1,30 @@ +# Copyright 2023 The Atlas Operator Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +namespace: atlas-operator-system +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../sqlserver +patches: +- target: + kind: Deployment + namespace: system + name: controller-manager + patch: |- + - op: add + path: "/spec/template/spec/containers/0/env/-" + value: + name: ALLOW_CUSTOM_CONFIG + value: "true" diff --git a/internal/controller/atlasmigration_controller.go b/internal/controller/atlasmigration_controller.go index 5cac65d..c6734cc 100644 --- a/internal/controller/atlasmigration_controller.go +++ b/internal/controller/atlasmigration_controller.go @@ -61,6 +61,8 @@ type ( secretWatcher *watch.ResourceWatcher recorder record.EventRecorder devDB *devDBReconciler + // AllowCustomConfig allows the controller to use custom atlas.hcl config. + allowCustomConfig bool } // migrationData is the data used to render the HCL template // that will be used for Atlas CLI @@ -77,15 +79,16 @@ type ( MigrateDown bool ObservedHash string RemoteDir *dbv1alpha1.Remote + Config *hclwrite.File + Vars atlasexec.Vars2 } ) -func NewAtlasMigrationReconciler(mgr Manager, atlas AtlasExecFn, prewarmDevDB bool) *AtlasMigrationReconciler { +func NewAtlasMigrationReconciler(mgr Manager, prewarmDevDB bool) *AtlasMigrationReconciler { r := mgr.GetEventRecorderFor("atlasmigration-controller") return &AtlasMigrationReconciler{ Client: mgr.GetClient(), scheme: mgr.GetScheme(), - atlasClient: atlas, configMapWatcher: watch.New(), secretWatcher: watch.New(), recorder: r, @@ -148,7 +151,7 @@ func (r *AtlasMigrationReconciler) Reconcile(ctx context.Context, req ctrl.Reque // TODO(giautm): Create DevDB and run linter for new migration // files before applying it to the target database. - if data.DevURL == "" { + if data.URL != nil && data.DevURL == "" { // The user has not specified an URL for dev-db, // spin up a dev-db and get the connection string. data.DevURL, err = r.devDB.devURL(ctx, res, *data.URL) @@ -209,6 +212,16 @@ func (r *AtlasMigrationReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } +// SetAtlasClient sets the Atlas client for the reconciler. +func (r *AtlasMigrationReconciler) SetAtlasClient(fn AtlasExecFn) { + r.atlasClient = fn +} + +// AllowCustomConfig allows the controller to use custom atlas.hcl config. +func (r *AtlasMigrationReconciler) AllowCustomConfig() { + r.allowCustomConfig = true +} + func (r *AtlasMigrationReconciler) watchRefs(res *dbv1alpha1.AtlasMigration) { if c := res.Spec.Dir.ConfigMapRef; c != nil { r.configMapWatcher.Watch( @@ -264,7 +277,7 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio } log.Info("reconciling migration", "env", data.EnvName) // Check if there are any pending migration files - status, err := c.MigrateStatus(ctx, &atlasexec.MigrateStatusParams{Env: data.EnvName}) + status, err := c.MigrateStatus(ctx, &atlasexec.MigrateStatusParams{Env: data.EnvName, Vars: data.Vars}) if err != nil { res.SetNotReady("Migrating", err.Error()) if isChecksumErr(err) { @@ -291,6 +304,7 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio TriggerType: atlasexec.TriggerTypeKubernetes, TriggerVersion: dbv1alpha1.VersionFromContext(ctx), }, + Vars: data.Vars, } // Atlas needs all versions to be present in the directory // to downgrade to a specific version. @@ -364,6 +378,7 @@ func (r *AtlasMigrationReconciler) reconcile(ctx context.Context, data *migratio TriggerType: atlasexec.TriggerTypeKubernetes, TriggerVersion: dbv1alpha1.VersionFromContext(ctx), }, + Vars: data.Vars, }) if err != nil { res.SetNotReady("Migrating", err.Error()) @@ -421,12 +436,26 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp MigrateDown: false, } ) + data.Config, err = s.GetConfig(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } + if !r.allowCustomConfig && data.Config != nil { + return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") + } + hasConfig := data.Config != nil + if hasConfig && data.EnvName == "" { + return nil, errors.New("env name must be set when using custom atlas.hcl config") + } if env := s.EnvName; env != "" { data.EnvName = env } if data.URL, err = s.DatabaseURL(ctx, r, res.Namespace); err != nil { return nil, transient(err) } + if !hasConfig && data.URL == nil { + return nil, transient(errors.New("no target database defined")) + } switch d := s.Dir; { case d.Remote.Name != "": c := s.Cloud @@ -495,6 +524,10 @@ func (r *AtlasMigrationReconciler) extractData(ctx context.Context, res *dbv1alp if err != nil { return nil, err } + data.Vars, err = s.GetVars(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } return data, nil } @@ -516,7 +549,12 @@ func (r *AtlasMigrationReconciler) recordErrEvent(res *dbv1alpha1.AtlasMigration // Calculate the hash of the given data func hashMigrationData(d *migrationData) (string, error) { h := sha256.New() - h.Write([]byte(d.URL.String())) + if d.URL != nil { + h.Write([]byte(d.URL.String())) + } + if d.Config != nil { + h.Write([]byte(d.Config.Bytes())) + } if c := d.Cloud; c != nil { h.Write([]byte(c.Token)) h.Write([]byte(c.URL)) @@ -552,26 +590,44 @@ func (d *migrationData) DirURL() string { // The template is used by the Atlas CLI to apply the migrations directory. // It also validates the data before rendering the template. func (d *migrationData) render(w io.Writer) error { - if d.URL == nil { + f := hclwrite.NewFile() + for _, b := range d.asBlocks() { + f.Body().AppendBlock(b) + } + // Merge the config block if it is set + if d.Config != nil { + mergeBlocks(d.Config.Body(), f.Body()) + f = d.Config + } + env := searchBlock(f.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return fmt.Errorf("env block %q is not found", d.EnvName) + } + b := env.Body() + if b.GetAttribute("url") == nil { return errors.New("database URL is empty") } + migrationblock := searchBlock(b, hclwrite.NewBlock("migration", nil)) + var dirAttr *hclwrite.Attribute + if migrationblock != nil { + dirAttr = migrationblock.Body().GetAttribute("dir") + } switch { case d.hasRemoteDir(): - if d.Dir != nil { + dirURL := dirAttr.Expr().BuildTokens(nil).Bytes() + if dirAttr != nil && !strings.Contains(string(dirURL), d.DirURL()) { return errors.New("cannot use both remote and local directory") } - if d.Cloud.Token == "" { - return errors.New("Atlas Cloud token is empty") + cloudBlock := searchBlock(f.Body(), hclwrite.NewBlock("atlas", nil)) + if cloudBlock == nil { + if cloudBlock.Body().GetAttribute("token") == nil { + return errors.New("Atlas Cloud token is empty") + } } - case d.Dir != nil: + case dirAttr != nil: default: return errors.New("migration directory is empty") } - f := hclwrite.NewFile() - fBody := f.Body() - for _, b := range d.asBlocks() { - fBody.AppendBlock(b) - } if _, err := f.WriteTo(w); err != nil { return err } diff --git a/internal/controller/atlasmigration_controller_test.go b/internal/controller/atlasmigration_controller_test.go index bef7dce..2f5514d 100644 --- a/internal/controller/atlasmigration_controller_test.go +++ b/internal/controller/atlasmigration_controller_test.go @@ -33,6 +33,8 @@ import ( "ariga.io/atlas-go-sdk/atlasexec" "ariga.io/atlas/sql/migrate" + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -988,6 +990,23 @@ func TestWatcher_enabled(t *testing.T) { }, watched) } +func TestCustomConfig_disabled(t *testing.T) { + tt := migrationCliTest(t) + tt.r.allowCustomConfig = false + _, err := tt.r.extractData(context.Background(), &dbv1alpha1.AtlasMigration{ + ObjectMeta: migrationObjmeta(), + Spec: dbv1alpha1.AtlasMigrationSpec{ + TargetSpec: dbv1alpha1.TargetSpec{URL: tt.dburl}, + EnvName: "test", + ProjectConfigSpec: dbv1alpha1.ProjectConfigSpec{ + Config: `env "test" {}`, + }, + }, + }) + require.Error(t, err) + require.Contains(t, err.Error(), "install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") +} + func TestDefaultTemplate(t *testing.T) { migrate := &migrationData{ EnvName: defaultEnvName, @@ -1030,6 +1049,32 @@ func TestBaselineTemplate(t *testing.T) { `, fileContent.String()) } +func TestCustomAtlasHCL(t *testing.T) { + migrate := &migrationData{ + EnvName: defaultEnvName, + Config: mustParseHCL(`env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "file://migrations" + baseline = "20230412003626" + } +} +`), + } + var fileContent bytes.Buffer + require.NoError(t, migrate.render(&fileContent)) + require.EqualValues(t, `env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "file://migrations" + baseline = "20230412003626" + } +} +`, fileContent.String()) +} + func TestCloudTemplate(t *testing.T) { migrate := &migrationData{ EnvName: defaultEnvName, @@ -1064,6 +1109,80 @@ env "kubernetes" { `, fileContent.String()) } +func TestCustomAtlasHCL_CloudTemplate(t *testing.T) { + migrate := &migrationData{ + EnvName: defaultEnvName, + RemoteDir: &dbv1alpha1.Remote{ + Name: "my-remote-dir", + Tag: "my-remote-tag", + }, + Config: mustParseHCL(`atlas { + cloud { + token = "my-token" + url = "https://atlasgo.io/" + project = "my-project" + } +} +env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "atlas://my-remote-dir?tag=my-remote-tag" + } +}`), + } + var fileContent bytes.Buffer + require.NoError(t, migrate.render(&fileContent)) + require.EqualValues(t, `atlas { + cloud { + token = "my-token" + url = "https://atlasgo.io/" + project = "my-project" + } +} +env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "atlas://my-remote-dir?tag=my-remote-tag" + } +}`, fileContent.String()) +} + +func TestCustomAtlasHCL_BaselineTemplate(t *testing.T) { + migrate := &migrationData{ + EnvName: defaultEnvName, + URL: must(url.Parse("sqlite://file2/?mode=memory")), + DevURL: "sqlite://dev/?mode=memory", + Cloud: &Cloud{ + URL: "https://atlasgo.io/", + Repo: "my-project", + Token: "my-token", + }, + RemoteDir: &dbv1alpha1.Remote{ + Name: "my-remote-dir", + Tag: "my-remote-tag", + }, + } + var fileContent bytes.Buffer + require.NoError(t, migrate.render(&fileContent)) + require.EqualValues(t, `atlas { + cloud { + token = "my-token" + url = "https://atlasgo.io/" + project = "my-project" + } +} +env "kubernetes" { + url = "sqlite://file2/?mode=memory" + dev = "sqlite://dev/?mode=memory" + migration { + dir = "atlas://my-remote-dir?tag=my-remote-tag" + } +} +`, fileContent.String()) +} + func TestMigrationWithDeploymentContext(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { type ( @@ -1174,6 +1293,7 @@ func newMigrationTest(t *testing.T) *migrationTest { scheme: scheme, recorder: r, }, + allowCustomConfig: true, }, } } @@ -1291,3 +1411,11 @@ func writeDir(t *testing.T, dir migrate.Dir, w io.Writer) { _, err = fmt.Fprintf(w, `{"data":{"dirState":{"content":%q}}}`, base64.StdEncoding.EncodeToString(arc)) require.NoError(t, err) } + +func mustParseHCL(content string) *hclwrite.File { + f, err := hclwrite.ParseConfig([]byte(content), "", hcl.InitialPos) + if err != nil { + panic(err) + } + return f +} diff --git a/internal/controller/atlasschema_controller.go b/internal/controller/atlasschema_controller.go index 1be910b..37b9e17 100644 --- a/internal/controller/atlasschema_controller.go +++ b/internal/controller/atlasschema_controller.go @@ -25,6 +25,7 @@ import ( "net/url" "path" "path/filepath" + "strconv" "strings" "time" @@ -64,6 +65,8 @@ type ( secretWatcher *watch.ResourceWatcher recorder record.EventRecorder devDB *devDBReconciler + // AllowCustomConfig allows the controller to use custom atlas.hcl config. + allowCustomConfig bool } // managedData contains information about the managed database and its desired state. managedData struct { @@ -76,24 +79,25 @@ type ( TxMode dbv1alpha1.TransactionMode Desired *url.URL Cloud *Cloud - - schema []byte + Config *hclwrite.File + Vars atlasexec.Vars2 + schema []byte } ) const sqlLimitSize = 1024 -func NewAtlasSchemaReconciler(mgr Manager, atlas AtlasExecFn, prewarmDevDB bool) *AtlasSchemaReconciler { - r := mgr.GetEventRecorderFor("atlasschema-controller") - return &AtlasSchemaReconciler{ +func NewAtlasSchemaReconciler(mgr Manager, prewarmDevDB bool) *AtlasSchemaReconciler { + rec := mgr.GetEventRecorderFor("atlasschema-controller") + r := &AtlasSchemaReconciler{ Client: mgr.GetClient(), scheme: mgr.GetScheme(), - atlasClient: atlas, configMapWatcher: watch.New(), secretWatcher: watch.New(), - recorder: r, - devDB: newDevDB(mgr, r, prewarmDevDB), + recorder: rec, + devDB: newDevDB(mgr, rec, prewarmDevDB), } + return r } // Reconcile is part of the main kubernetes reconciliation loop which aims to @@ -132,6 +136,10 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) r.recordErrEvent(res, err) return result(err) } + if data.hasTargets() { + res.SetNotReady("MultipleTargets", "Multiple targets are not supported") + return ctrl.Result{}, nil + } hash, err := data.hash() if err != nil { res.SetNotReady("CalculatingHash", err.Error()) @@ -149,7 +157,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) // Starting area to handle the heavy jobs. // Below this line is the main logic of the controller. // ==================================================== - if data.DevURL == "" { + if data.URL != nil && data.DevURL == "" { // The user has not specified an URL for dev-db, // spin up a dev-db and get the connection string. data.DevURL, err = r.devDB.devURL(ctx, res, *data.URL) @@ -203,6 +211,12 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) log.Info("the resource is connected to Atlas Cloud", "org", whoami.Org) } var reports []*atlasexec.SchemaApply + shouldLint, err := data.shouldLint() + if err != nil { + res.SetNotReady("LintPolicyError", err.Error()) + r.recordErrEvent(res, err) + return result(err) + } switch desiredURL := data.Desired.String(); { // The resource is connected to Atlas Cloud. case whoami != nil: @@ -217,6 +231,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Env: data.EnvName, To: desiredURL, TxMode: string(data.TxMode), + Vars: data.Vars, } repo := data.repoURL() if repo == nil { @@ -237,6 +252,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Env: data.EnvName, URL: desiredURL, Format: `{{ .Hash | base64url }}`, + Vars: data.Vars, }) if err != nil { reason, msg := "SchemaPush", err.Error() @@ -253,6 +269,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Name: path.Join(repo.Host, repo.Path), Tag: fmt.Sprintf("operator-plan-%.8s", strings.ToLower(tag)), URL: []string{desiredURL}, + Vars: data.Vars, }) if err != nil { reason, msg := "SchemaPush", err.Error() @@ -274,6 +291,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) From: []string{"env://url"}, To: []string{desiredURL}, Pending: true, + Vars: data.Vars, }) switch { case err != nil && strings.Contains(err.Error(), "no changes to be made"): @@ -308,6 +326,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) Repo: repo.String(), From: []string{"env://url"}, To: []string{desiredURL}, + Vars: data.Vars, }); { case err != nil: reason, msg := "ListingPlans", err.Error() @@ -367,7 +386,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) }); err != nil { return result(err) } - err = r.lint(ctx, wd, data, nil) + err = r.lint(ctx, wd, data, data.Vars) switch d := (*destructiveErr)(nil); { case errors.As(err, &d): reason, msg := d.FirstRun() @@ -396,9 +415,10 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) To: desiredURL, TxMode: string(data.TxMode), AutoApprove: true, + Vars: data.Vars, }) // Run the linting policy. - case data.shouldLint(): + case shouldLint: if err = r.lint(ctx, wd, data, nil); err != nil { reason, msg := "LintPolicyError", err.Error() res.SetNotReady(reason, msg) @@ -414,6 +434,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) To: desiredURL, TxMode: string(data.TxMode), AutoApprove: true, + Vars: data.Vars, }) // No linting policy is set. default: @@ -422,6 +443,7 @@ func (r *AtlasSchemaReconciler) Reconcile(ctx context.Context, req ctrl.Request) To: desiredURL, TxMode: string(data.TxMode), AutoApprove: true, + Vars: data.Vars, }) } if err != nil { @@ -497,6 +519,16 @@ func (r *AtlasSchemaReconciler) watchRefs(res *dbv1alpha1.AtlasSchema) { } } +// SetAtlasClient sets the Atlas client function. +func (r *AtlasSchemaReconciler) SetAtlasClient(fn AtlasExecFn) { + r.atlasClient = fn +} + +// AllowCustomConfig allows the controller to use custom atlas.hcl config. +func (r *AtlasSchemaReconciler) AllowCustomConfig() { + r.allowCustomConfig = true +} + // extractData extracts the info about the managed database and its desired state. func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1.AtlasSchema) (_ *managedData, err error) { var ( @@ -511,6 +543,20 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 TxMode: s.TxMode, } ) + data.Config, err = s.GetConfig(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } + if !r.allowCustomConfig && data.Config != nil { + return nil, errors.New("install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") + } + hasConfig := data.Config != nil + if hasConfig && data.EnvName == "" { + return nil, errors.New("env name must be set when using custom atlas.hcl config") + } + if s := s.EnvName; s != "" { + data.EnvName = s + } if s := c.TokenFrom.SecretKeyRef; s != nil { token, err := getSecretValue(ctx, r, res.Namespace, s) if err != nil { @@ -529,6 +575,9 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 if err != nil { return nil, transient(err) } + if !hasConfig && data.URL == nil { + return nil, transient(errors.New("no target database defined")) + } data.Desired, data.schema, err = s.Schema.DesiredState(ctx, r, res.Namespace) if err != nil { return nil, transient(err) @@ -541,6 +590,10 @@ func (r *AtlasSchemaReconciler) extractData(ctx context.Context, res *dbv1alpha1 return nil, err } } + data.Vars, err = s.GetVars(ctx, r, res.Namespace) + if err != nil { + return nil, transient(err) + } return data, nil } @@ -553,12 +606,48 @@ func (r *AtlasSchemaReconciler) recordErrEvent(res *dbv1alpha1.AtlasSchema, err } // ShouldLint returns true if the linting policy is set to error. -func (d *managedData) shouldLint() bool { +func (d *managedData) shouldLint() (bool, error) { p := d.Policy if p == nil || p.Lint == nil || p.Lint.Destructive == nil { + // Check if the destructive policy is set to error in the custom config. + // This check won't work if the attribute value has expressions in it. + if d.Config != nil { + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return false, nil + } + lint := searchBlock(env.Body(), hclwrite.NewBlock("lint", nil)) + if lint == nil { + return false, nil + } + if v := searchBlock(lint.Body(), hclwrite.NewBlock("destructive", nil)); v != nil { + destructiveErr := v.Body().GetAttribute("error") + if destructiveErr != nil { + attrValue := strings.TrimSpace(string(destructiveErr.Expr().BuildTokens(nil).Bytes())) + attrValue = strings.Trim(attrValue, "\"") + b, err := strconv.ParseBool(attrValue) + if err != nil { + return b, errors.New("cannot determine the value of the destructive.error attribute") + } + return b, nil + } + } + } + return false, nil + } + return p.Lint.Destructive.Error, nil +} + +// hasTargets returns true if the environment has multiple targets/ multi-tenancy. +func (d *managedData) hasTargets() bool { + if d.Config == nil { return false } - return p.Lint.Destructive.Error + env := searchBlock(d.Config.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return false + } + return env.Body().GetAttribute("for_each") != nil } // hash returns the sha256 hash of the desired. @@ -595,22 +684,31 @@ func (d *managedData) repoURL() *url.URL { // The template is used by the Atlas CLI to apply the schema. // It also validates the data before rendering the template. func (d *managedData) render(w io.Writer) error { - if d.EnvName == "" { - return errors.New("env name is not set") + f := hclwrite.NewFile() + for _, b := range d.asBlocks() { + f.Body().AppendBlock(b) } - if d.URL == nil { - return errors.New("database url is not set") + // Merge config into the atlas.hcl file. + if d.Config != nil { + mergeBlocks(d.Config.Body(), f.Body()) + f = d.Config } - if d.DevURL == "" { - return errors.New("dev url is not set") + if d.EnvName == "" { + return errors.New("env name is not set") } if d.Desired == nil { return errors.New("the desired state is not set") } - f := hclwrite.NewFile() - fBody := f.Body() - for _, b := range d.asBlocks() { - fBody.AppendBlock(b) + env := searchBlock(f.Body(), hclwrite.NewBlock("env", []string{d.EnvName})) + if env == nil { + return fmt.Errorf("env block %q is not found", d.EnvName) + } + b := env.Body() + if b.GetAttribute("url") == nil { + return errors.New("database url is not set") + } + if b.GetAttribute("dev") == nil { + return errors.New("dev url is not set") } if _, err := f.WriteTo(w); err != nil { return err diff --git a/internal/controller/atlasschema_controller_test.go b/internal/controller/atlasschema_controller_test.go index 0532b0a..047b7ff 100644 --- a/internal/controller/atlasschema_controller_test.go +++ b/internal/controller/atlasschema_controller_test.go @@ -202,6 +202,57 @@ func TestExtractData_CustomDevURL_Secret(t *testing.T) { require.EqualValues(t, "mysql://dev", data.DevURL) } +func TestExtractData_LintExpression(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +env "kubernetes" { + lint { + destructive { + error = 1 == 1 + } + } +} + ` + tt := newTest(t) + data, err := tt.r.extractData(context.Background(), sc) + require.NoError(t, err) + _, err = data.shouldLint() + require.Error(t, err) + require.Contains(t, err.Error(), "cannot determine the value of the destructive.error attribute") +} + +func TestExtractData_MultiTargets(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +env "kubernetes" { + for_each = ["foo", "bar"] +} + ` + tt := newTest(t) + data, err := tt.r.extractData(context.Background(), sc) + require.NoError(t, err) + hasTargets := data.hasTargets() + require.True(t, hasTargets) +} + +func TestExtractData_DisabledCustomConfig(t *testing.T) { + sc := conditionReconciling() + sc.Spec.DevURL = "mysql://dev" + sc.Spec.EnvName = "kubernetes" + sc.Spec.Config = ` +env "kubernetes" {} + ` + tt := newTest(t) + tt.r.allowCustomConfig = false + _, err := tt.r.extractData(context.Background(), sc) + require.Error(t, err) + require.Contains(t, err.Error(), "install the operator with \"--set allowCustomConfig=true\" to use custom atlas.hcl config") +} + func TestReconcile_Credentials_BadPassSecret(t *testing.T) { tt := newTest(t) sc := conditionReconciling() @@ -453,6 +504,61 @@ func TestConfigTemplate(t *testing.T) { require.EqualValues(t, expected, buf.String()) } +func TestCustomAtlasHCL_PolicyTemplate(t *testing.T) { + var buf bytes.Buffer + data := &managedData{ + EnvName: defaultEnvName, + URL: must(url.Parse("mysql://root:password@localhost:3306/test")), + DevURL: "mysql://root:password@localhost:3306/dev", + Schemas: []string{"foo", "bar"}, + Desired: must(url.Parse("file://schema.sql")), + Config: mustParseHCL(` +env "kubernetes" { + diff { + concurrent_index { + create = true + drop = true + } + skip { + drop_schema = true + drop_table = true + } + } + lint { + destructive { + error = true + } + } +} + `), + } + err := data.render(&buf) + require.NoError(t, err) + expected := ` +env "kubernetes" { + diff { + concurrent_index { + create = true + drop = true + } + skip { + drop_schema = true + drop_table = true + } + } + lint { + destructive { + error = true + } + } + dev = "mysql://root:password@localhost:3306/dev" + schemas = ["foo", "bar"] + url = "mysql://root:password@localhost:3306/test" +} + ` + require.EqualValues(t, expected, buf.String()) +} + func conditionReconciling() *dbv1alpha1.AtlasSchema { return &dbv1alpha1.AtlasSchema{ ObjectMeta: objmeta(), @@ -543,6 +649,7 @@ func newTest(t *testing.T) *test { scheme: scheme, recorder: r, }, + allowCustomConfig: true, }, } } diff --git a/internal/controller/common.go b/internal/controller/common.go index bf208f8..4c1ba89 100644 --- a/internal/controller/common.go +++ b/internal/controller/common.go @@ -15,15 +15,21 @@ package controller import ( + "cmp" "context" "errors" "fmt" + "iter" + "maps" "regexp" + "slices" "strings" "time" "ariga.io/atlas-go-sdk/atlasexec" "ariga.io/atlas/sql/migrate" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -194,3 +200,78 @@ func listStringVal(s []string) cty.Value { } return cty.ListVal(v) } + +// mergeBlocks merges the given block with the env block with the given name. +func mergeBlocks(dst *hclwrite.Body, src *hclwrite.Body) { + for _, srcBlk := range src.Blocks() { + distBlk := searchBlock(dst, srcBlk) + // If there is no block with the same type and name, append it. + if distBlk == nil { + appendBlock(dst, srcBlk) + continue + } + mergeBlock(distBlk, srcBlk) + } +} + +// searchBlock searches for a block with the given type and name in the parent block. +func searchBlock(parent *hclwrite.Body, target *hclwrite.Block) *hclwrite.Block { + blocks := parent.Blocks() + typBlocks := make([]*hclwrite.Block, 0, len(blocks)) + for _, b := range blocks { + if b.Type() == target.Type() { + typBlocks = append(typBlocks, b) + } + } + if len(typBlocks) == 0 { + // No things here, return nil. + return nil + } + // Check if there is a block with the given name. + idx := slices.IndexFunc(typBlocks, func(b *hclwrite.Block) bool { + return slices.Compare(b.Labels(), target.Labels()) == 0 + }) + if idx == -1 { + // No block matched, check if there is an unnamed env block. + idx = slices.IndexFunc(typBlocks, func(b *hclwrite.Block) bool { + return len(b.Labels()) == 0 + }) + if idx == -1 { + return nil + } + } + return typBlocks[idx] +} + +// mergeBlock merges the source block into the destination block. +func mergeBlock(dst, src *hclwrite.Block) { + for name, attr := range mapsSorted(src.Body().Attributes()) { + dst.Body().SetAttributeRaw(name, attr.Expr().BuildTokens(nil)) + } + // Traverse to the nested blocks. + mergeBlocks(dst.Body(), src.Body()) +} + +// appendBlock appends a block to the body and ensures there is a newline before the block. +// It returns the appended block. +// +// There is a bug in hclwrite that causes the block to be appended without a newline +// https://github.com/hashicorp/hcl/issues/687 +func appendBlock(body *hclwrite.Body, blk *hclwrite.Block) *hclwrite.Block { + t := body.BuildTokens(nil) + if len(t) == 0 || t[len(t)-1].Type != hclsyntax.TokenNewline { + body.AppendNewline() + } + return body.AppendBlock(blk) +} + +// mapsSorted return a sequence of key-value pairs sorted by key. +func mapsSorted[K cmp.Ordered, V any](m map[K]V) iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + for _, k := range slices.Sorted(maps.Keys(m)) { + if !yield(k, m[k]) { + return + } + } + } +} diff --git a/internal/controller/common_test.go b/internal/controller/common_test.go new file mode 100644 index 0000000..918e85f --- /dev/null +++ b/internal/controller/common_test.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Atlas Operator Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "testing" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclwrite" +) + +func Test_mergeBlocks(t *testing.T) { + type args struct { + dst string + src string + } + tests := []struct { + name string + args args + expected string + }{ + { + name: "empty", + args: args{ + dst: "", + src: "", + }, + expected: "", + }, + { + name: "dst empty", + args: args{ + dst: "", + src: `env "example" {}`, + }, + expected: ` +env "example" {}`, + }, + { + name: "same block", + args: args{ + dst: `env "example" {}`, + src: `env "example" {}`, + }, + expected: `env "example" {}`, + }, + { + name: "different block", + args: args{ + dst: `env "example" {}`, + src: `env "example2" {}`, + }, + expected: `env "example" {} +env "example2" {}`, + }, + { + name: "same block with different attributes", + args: args{ + dst: ` +env "example" { + key = "value" +}`, + src: ` +env "example" { + key2 = "value2" +}`, + }, + expected: ` +env "example" { + key = "value" + key2 = "value2" +}`, + }, + { + name: "same block with same attributes", + args: args{ + dst: ` +env "example" { + key = "value" +}`, + src: ` +env "example" { + key = "value2" +}`, + }, + expected: ` +env "example" { + key = "value2" +}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dst, _ := hclwrite.ParseConfig([]byte(tt.args.dst), "", hcl.InitialPos) + src, _ := hclwrite.ParseConfig([]byte(tt.args.src), "", hcl.InitialPos) + mergeBlocks(dst.Body(), src.Body()) + if got := string(dst.Bytes()); got != tt.expected { + t.Errorf("mergeBlocks() = %v, want %v", got, tt.expected) + } + }) + } +} diff --git a/internal/controller/testhelper.go b/internal/controller/testhelper.go index 9c7c802..de7cad5 100644 --- a/internal/controller/testhelper.go +++ b/internal/controller/testhelper.go @@ -118,7 +118,10 @@ func (m *mockAtlasExec) MigrateStatus(context.Context, *atlasexec.MigrateStatusP } // newRunner returns a runner that can be used to test a reconcile.Reconciler. -func newRunner[T reconcile.Reconciler](fn func(Manager, AtlasExecFn, bool) T, modify func(*fake.ClientBuilder), mock *mockAtlasExec) (*helper, runner) { +func newRunner[T interface { + Reconcile(context.Context, reconcile.Request) (reconcile.Result, error) + SetAtlasClient(AtlasExecFn) +}](fn func(Manager, bool) T, modify func(*fake.ClientBuilder), mock *mockAtlasExec) (*helper, runner) { scheme := runtime.NewScheme() clientgoscheme.AddToScheme(scheme) dbv1alpha1.AddToScheme(scheme) @@ -132,12 +135,13 @@ func newRunner[T reconcile.Reconciler](fn func(Manager, AtlasExecFn, bool) T, mo client: c, recorder: r, scheme: scheme, - }, func(s string, c *Cloud) (AtlasExec, error) { + }, true) + a.SetAtlasClient(func(s string, c *Cloud) (AtlasExec, error) { if mock == nil { return NewAtlasExec(s, c) } return mock, nil - }, true) + }) h := &helper{client: c, recorder: r} return h, func(obj client.Object, fn check) { fn(a.Reconcile(context.Background(), request(obj))) diff --git a/skaffold.yaml b/skaffold.yaml index 697182d..7114f26 100644 --- a/skaffold.yaml +++ b/skaffold.yaml @@ -30,6 +30,7 @@ profiles: paths: - config/default - config/sqlserver + - config/customconfig - name: helm deploy: helm: diff --git a/test/e2e/testscript/custom-config.txtar b/test/e2e/testscript/custom-config.txtar new file mode 100644 index 0000000..a302c6e --- /dev/null +++ b/test/e2e/testscript/custom-config.txtar @@ -0,0 +1,273 @@ +env SCHEMA_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable +env SCHEMA_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable +env MIGRATE_DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5434/postgres?sslmode=disable +env MIGRATE_DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5435/postgres?sslmode=disable +kubectl apply -f database.yaml +kubectl create secret generic schema-db-creds --from-literal=url=${SCHEMA_DB_URL} +kubectl create configmap schema-db-dev-creds --from-literal=url=${SCHEMA_DB_DEV_URL} +kubectl create secret generic migrate-db-creds --from-literal=url=${MIGRATE_DB_URL} +kubectl create configmap migrate-db-dev-creds --from-literal=url=${MIGRATE_DB_DEV_URL} + +# Wait for the first pod created +kubectl-wait-available deploy/postgres +# Wait for the DB ready before creating the schema +kubectl-wait-ready -l app=postgres pods + +# Create the schema +kubectl apply -f schema.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample + +# Create the migration +kubectl apply -f migration.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample + +# Patch a invalid config of schema from secret +kubectl create secret generic schema-config --from-file=config.hcl +kubectl create secret generic migration-config --from-file=config.hcl +kubectl patch -f schema.yaml --type merge --patch-file patch-schema-config-from-configmap.yaml +kubectl patch -f migration.yaml --type merge --patch-file patch-migration-config-from-configmap.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasSchemas/sample +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=Applied --timeout=500s AtlasMigrations/sample + +# Ensure the operator priority the spec url over the config +kubectl patch -f schema.yaml --type merge --patch-file patch-invalid-url.yaml +kubectl patch -f migration.yaml --type merge --patch-file patch-invalid-url.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasSchemas/sample +kubectl wait --for=jsonpath='{.status.conditions[*].message}'='"Error: postgres: scanning system variables: pq: Could not detect default username. Please provide one explicitly"' --timeout=500s AtlasMigrations/sample + +-- schema.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + name: sample +spec: + envName: "test" + schema: + sql: | + create table users ( + id int not null, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: schema-db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: schema-db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + env "test" { + url = var.db_url + dev = var.dev_db_url + } +-- patch-schema-config-from-configmap.yaml -- +spec: + config: + configFrom: + secretKeyRef: + name: schema-config + key: config.hcl +-- patch-migration-config-from-configmap.yaml -- +spec: + config: + configFrom: + secretKeyRef: + name: schema-config + key: config.hcl +-- patch-invalid-url.yaml -- +spec: + url: "postgres://invalid" +-- config.hcl -- +variable "db_url" { + type = string +} +variable "dev_db_url" { + type = string +} +env "test" { + url = var.db_url + dev = var.dev_db_url +} +-- migration.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasMigration +metadata: + name: sample +spec: + envName: "test" + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: migrate-db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: migrate-db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + env "test" { + url = var.db_url + dev = var.dev_db_url + } + dir: + local: + 20230316085611.sql: | + create sequence users_seq; + create table users ( + id int not null default nextval ('users_seq'), + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + atlas.sum: | + h1:FwM0ApKo8xhcZFrSlpa6dYjvi0fnDPo/aZSzajtbHLc= + 20230316085611.sql h1:ldFr73m6ZQzNi8q9dVJsOU/ZHmkBo4Sax03AaL0VUUs= +-- database.yaml -- +apiVersion: v1 +kind: Service +metadata: + name: postgres +spec: + selector: + app: postgres + ports: + - name: postgres + port: 5432 + targetPort: postgres + - name: postgres-dev + port: 5433 + targetPort: postgres-dev + - name: pg-migrate + port: 5434 + targetPort: pg-migrate + - name: pg-migrate-dev + port: 5435 + targetPort: pg-migrate-dev + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: postgres +spec: + selector: + matchLabels: + app: postgres + replicas: 1 + template: + metadata: + labels: + app: postgres + spec: + securityContext: + runAsNonRoot: true + runAsUser: 999 + containers: + - name: postgres + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + ports: + - containerPort: 5432 + name: postgres + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: postgres-dev + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5433" + ports: + - containerPort: 5433 + name: postgres-dev + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: pg-migrate + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5434" + ports: + - containerPort: 5434 + name: pg-migrate + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 + - name: pg-migrate-dev + image: postgres:15.4 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + env: + - name: POSTGRES_PASSWORD + value: pass + - name: POSTGRES_USER + value: root + - name: PGPORT + value: "5435" + ports: + - containerPort: 5435 + name: pg-migrate-dev + startupProbe: + exec: + command: [ "pg_isready" ] + failureThreshold: 30 + periodSeconds: 10 diff --git a/test/e2e/testscript/multi-tenancy.txtar b/test/e2e/testscript/multi-tenancy.txtar new file mode 100644 index 0000000..5d0c226 --- /dev/null +++ b/test/e2e/testscript/multi-tenancy.txtar @@ -0,0 +1,154 @@ +env DB_URL=postgres://root:pass@postgres.${NAMESPACE}:5432/postgres?sslmode=disable +env DB_DEV_URL=postgres://root:pass@postgres.${NAMESPACE}:5433/postgres?sslmode=disable +kubectl apply -f database.yaml +kubectl create secret generic db-creds --from-literal=url=${DB_URL} +kubectl create configmap db-dev-creds --from-literal=url=${DB_DEV_URL} + +# Wait for the first pod created +kubectl-wait-available deploy/postgres +# Wait for the DB ready before creating the schema +kubectl-wait-ready -l app=postgres pods + +# Create the schema +kubectl exec -i deploy/postgres -- psql -U root -d postgres -c 'CREATE SCHEMA tenant_1;' +kubectl exec -i deploy/postgres -- psql -U root -d postgres -c 'CREATE SCHEMA tenant_2;' + +# Inspect the schema to ensure it's correct +atlas schema inspect -u ${DB_URL} +cmp stdout schema.hcl + +kubectl apply -f multi-tenancy.yaml +kubectl wait --for=jsonpath='{.status.conditions[*].reason}'=MultipleTargets --timeout=500s AtlasSchemas/multi-tenancy + +-- multi-tenancy.yaml -- +apiVersion: db.atlasgo.io/v1alpha1 +kind: AtlasSchema +metadata: + name: multi-tenancy +spec: + envName: "test" + schema: + sql: | + create table users ( + id int not null, + name varchar(255) not null, + email varchar(255) unique not null, + short_bio varchar(255) not null, + primary key (id) + ); + vars: + - key: "db_url" + valueFrom: + secretKeyRef: + name: db-creds + key: url + - key: "dev_db_url" + valueFrom: + configMapKeyRef: + name: db-dev-creds + key: url + config: | + variable "db_url" { + type = string + } + variable "dev_db_url" { + type = string + } + data "sql" "tenants" { + url = var.db_url + query = <