diff --git a/.chainsaw.yaml b/.chainsaw.yaml index d048c66..1dd8db4 100644 --- a/.chainsaw.yaml +++ b/.chainsaw.yaml @@ -3,10 +3,10 @@ kind: Configuration metadata: name: custom-config spec: - # namespace: test + #namespace: test timeouts: - apply: 420s - assert: 600s + apply: 120s + assert: 120s cleanup: 240s delete: 240s error: 200s diff --git a/api/v1alpha1/datanode.go b/api/v1alpha1/datanode.go index 96fd005..a4d99f3 100644 --- a/api/v1alpha1/datanode.go +++ b/api/v1alpha1/datanode.go @@ -1,6 +1,9 @@ package v1alpha1 -import corev1 "k8s.io/api/core/v1" +import ( + commonsv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/commons/v1alpha1" + corev1 "k8s.io/api/core/v1" +) type DataNodeRoleGroupSpec struct { // +kubebuilder:validation:Optional @@ -22,7 +25,7 @@ type DataNodeRoleGroupSpec struct { type DataNodeConfigSpec struct { // +kubebuilder:validation:Optional - Resources *ResourcesSpec `json:"resources,omitempty"` + Resources *commonsv1alpha1.ResourcesSpec `json:"resources,omitempty"` // +kubebuilder:validation:Optional // +kubebuilder:default="external-unstable" diff --git a/api/v1alpha1/image.go b/api/v1alpha1/image.go index 0f5dbde..37945c7 100644 --- a/api/v1alpha1/image.go +++ b/api/v1alpha1/image.go @@ -47,5 +47,6 @@ func TransformImage(imageSpec *ImageSpec) *util.Image { ProductVersion: imageSpec.ProductVersion, PullPolicy: imageSpec.PullPolicy, PullSecretName: imageSpec.PullSecretName, + ProductName: DefaultProductName, } } diff --git a/api/v1alpha1/journalnode.go b/api/v1alpha1/journalnode.go index 07bf2b3..9422f41 100644 --- a/api/v1alpha1/journalnode.go +++ b/api/v1alpha1/journalnode.go @@ -1,6 +1,9 @@ package v1alpha1 -import corev1 "k8s.io/api/core/v1" +import ( + commonsv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/commons/v1alpha1" + corev1 "k8s.io/api/core/v1" +) type JournalNodeRoleGroupSpec struct { // +kubebuilder:validation:Optional @@ -22,7 +25,7 @@ type JournalNodeRoleGroupSpec struct { type JournalNodeConfigSpec struct { // +kubebuilder:validation:Optional - Resources *ResourcesSpec `json:"resources,omitempty"` + Resources *commonsv1alpha1.ResourcesSpec `json:"resources,omitempty"` // +kubebuilder:validation:Optional // +kubebuilder:default="cluster-internal" diff --git a/api/v1alpha1/namenode.go b/api/v1alpha1/namenode.go index c8a0cfd..9397bc4 100644 --- a/api/v1alpha1/namenode.go +++ b/api/v1alpha1/namenode.go @@ -1,6 +1,9 @@ package v1alpha1 -import corev1 "k8s.io/api/core/v1" +import ( + commonsv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/commons/v1alpha1" + corev1 "k8s.io/api/core/v1" +) type NameNodeRoleGroupSpec struct { // +kubebuilder:validation:Optional @@ -22,7 +25,7 @@ type NameNodeRoleGroupSpec struct { type NameNodeConfigSpec struct { // +kubebuilder:validation:Optional - Resources *ResourcesSpec `json:"resources,omitempty"` + Resources *commonsv1alpha1.ResourcesSpec `json:"resources,omitempty"` // +kubebuilder:validation:Optional // +kubebuilder:default="external-stable" diff --git a/api/v1alpha1/resources.go b/api/v1alpha1/resources.go deleted file mode 100644 index ae778fa..0000000 --- a/api/v1alpha1/resources.go +++ /dev/null @@ -1,47 +0,0 @@ -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -const ( - CpuMin = "100m" - CpuMax = "500m" - MemoryLimit = "1.5Gi" -) - -type ResourcesSpec struct { - // +kubebuilder:validation:Optional - CPU *CPUResource `json:"cpu,omitempty"` - - // +kubebuilder:validation:Optional - Memory *MemoryResource `json:"memory,omitempty"` - - // +kubebuilder:validation:Optional - Storage *StorageResource `json:"storage,omitempty"` -} - -type StorageResourceSpec struct { - Data *StorageResource `json:"data"` -} - -type CPUResource struct { - // +kubebuilder:validation:Optional - Max *resource.Quantity `json:"max,omitempty"` - - // +kubebuilder:validation:Optional - Min *resource.Quantity `json:"min,omitempty"` -} - -type MemoryResource struct { - // +kubebuilder:validation:Optional - Limit *resource.Quantity `json:"limit,omitempty"` -} - -type StorageResource struct { - // +kubebuilder:validation:Optional - Capacity *resource.Quantity `json:"capacity,omitempty"` - - // +kubebuilder:validation:Optional - StorageClass string `json:"storageClass,omitempty"` -} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c6a7b94..93e00a2 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + commonsv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/commons/v1alpha1" "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -50,31 +51,6 @@ func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CPUResource) DeepCopyInto(out *CPUResource) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - x := (*in).DeepCopy() - *out = &x - } - if in.Min != nil { - in, out := &in.Min, &out.Min - x := (*in).DeepCopy() - *out = &x - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUResource. -func (in *CPUResource) DeepCopy() *CPUResource { - if in == nil { - return nil - } - out := new(CPUResource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterConfigSpec) DeepCopyInto(out *ClusterConfigSpec) { *out = *in @@ -169,7 +145,7 @@ func (in *DataNodeConfigSpec) DeepCopyInto(out *DataNodeConfigSpec) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = new(ResourcesSpec) + *out = new(commonsv1alpha1.ResourcesSpec) (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { @@ -476,7 +452,7 @@ func (in *JournalNodeConfigSpec) DeepCopyInto(out *JournalNodeConfigSpec) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = new(ResourcesSpec) + *out = new(commonsv1alpha1.ResourcesSpec) (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { @@ -725,32 +701,12 @@ func (in *LoggingConfigSpec) DeepCopy() *LoggingConfigSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MemoryResource) DeepCopyInto(out *MemoryResource) { - *out = *in - if in.Limit != nil { - in, out := &in.Limit, &out.Limit - x := (*in).DeepCopy() - *out = &x - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryResource. -func (in *MemoryResource) DeepCopy() *MemoryResource { - if in == nil { - return nil - } - out := new(MemoryResource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameNodeConfigSpec) DeepCopyInto(out *NameNodeConfigSpec) { *out = *in if in.Resources != nil { in, out := &in.Resources, &out.Resources - *out = new(ResourcesSpec) + *out = new(commonsv1alpha1.ResourcesSpec) (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { @@ -958,36 +914,6 @@ func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourcesSpec) DeepCopyInto(out *ResourcesSpec) { - *out = *in - if in.CPU != nil { - in, out := &in.CPU, &out.CPU - *out = new(CPUResource) - (*in).DeepCopyInto(*out) - } - if in.Memory != nil { - in, out := &in.Memory, &out.Memory - *out = new(MemoryResource) - (*in).DeepCopyInto(*out) - } - if in.Storage != nil { - in, out := &in.Storage, &out.Storage - *out = new(StorageResource) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesSpec. -func (in *ResourcesSpec) DeepCopy() *ResourcesSpec { - if in == nil { - return nil - } - out := new(ResourcesSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = *in @@ -1010,46 +936,6 @@ func (in *ServiceSpec) DeepCopy() *ServiceSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageResource) DeepCopyInto(out *StorageResource) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - x := (*in).DeepCopy() - *out = &x - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageResource. -func (in *StorageResource) DeepCopy() *StorageResource { - if in == nil { - return nil - } - out := new(StorageResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageResourceSpec) DeepCopyInto(out *StorageResourceSpec) { - *out = *in - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = new(StorageResource) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageResourceSpec. -func (in *StorageResourceSpec) DeepCopy() *StorageResourceSpec { - if in == nil { - return nil - } - out := new(StorageResourceSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TlsSpec) DeepCopyInto(out *TlsSpec) { *out = *in diff --git a/config/crd/bases/hdfs.zncdata.dev_hdfsclusters.yaml b/config/crd/bases/hdfs.zncdata.dev_hdfsclusters.yaml index 73cefb7..6ad42d9 100644 --- a/config/crd/bases/hdfs.zncdata.dev_hdfsclusters.yaml +++ b/config/crd/bases/hdfs.zncdata.dev_hdfsclusters.yaml @@ -1259,6 +1259,7 @@ spec: anyOf: - type: integer - type: string + default: 10Gi pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true storageClass: @@ -2737,6 +2738,7 @@ spec: anyOf: - type: integer - type: string + default: 10Gi pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true storageClass: @@ -4139,6 +4141,7 @@ spec: anyOf: - type: integer - type: string + default: 10Gi pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true storageClass: @@ -5534,6 +5537,7 @@ spec: anyOf: - type: integer - type: string + default: 10Gi pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true storageClass: @@ -7165,6 +7169,7 @@ spec: anyOf: - type: integer - type: string + default: 10Gi pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true storageClass: @@ -8809,6 +8814,7 @@ spec: anyOf: - type: integer - type: string + default: 10Gi pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true storageClass: diff --git a/internal/common/role_config.go b/internal/common/role_config.go index 53afd84..0d4f49b 100644 --- a/internal/common/role_config.go +++ b/internal/common/role_config.go @@ -1,10 +1,11 @@ package common import ( - hdfsv1alpha1 "github.com/zncdatadev/hdfs-operator/api/v1alpha1" - "k8s.io/apimachinery/pkg/api/resource" "reflect" "time" + + commonsv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/commons/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" ) const ( @@ -12,7 +13,7 @@ const ( ) type RoleNodeConfig struct { - resources *hdfsv1alpha1.ResourcesSpec + resources *commonsv1alpha1.ResourcesSpec // logging config todo listenerClass string common *GeneralNodeConfig @@ -24,8 +25,8 @@ type GeneralNodeConfig struct { gracefulShutdownTimeoutSeconds time.Duration } -func newDefaultResourceSpec(role Role) *hdfsv1alpha1.ResourcesSpec { - var cpuMin, cpuMax, memoryLimit, storage *resource.Quantity +func newDefaultResourceSpec(role Role) *commonsv1alpha1.ResourcesSpec { + var cpuMin, cpuMax, memoryLimit, storage resource.Quantity switch role { case NameNode: cpuMin = parseQuantity("250m") @@ -45,15 +46,15 @@ func newDefaultResourceSpec(role Role) *hdfsv1alpha1.ResourcesSpec { default: panic("invalid role") } - return &hdfsv1alpha1.ResourcesSpec{ - CPU: &hdfsv1alpha1.CPUResource{ + return &commonsv1alpha1.ResourcesSpec{ + CPU: &commonsv1alpha1.CPUResource{ Min: cpuMin, Max: cpuMax, }, - Memory: &hdfsv1alpha1.MemoryResource{ + Memory: &commonsv1alpha1.MemoryResource{ Limit: memoryLimit, }, - Storage: &hdfsv1alpha1.StorageResource{ + Storage: &commonsv1alpha1.StorageResource{ Capacity: storage, }, } @@ -88,7 +89,9 @@ func DefaultJournalNodeConfig(clusterName string) *RoleNodeConfig { return DefaultNodeConfig(clusterName, JournalNode, "", 15*time.Minute) } +// todo: refactor this, do this using detail type func (n *RoleNodeConfig) MergeDefaultConfig(mergedCfg any) { + // Make sure mergedCfg is a pointer type configValue := reflect.ValueOf(mergedCfg) if configValue.Kind() != reflect.Ptr { @@ -107,8 +110,34 @@ func (n *RoleNodeConfig) MergeDefaultConfig(mergedCfg any) { // Get the Resources field resourcesField := config.FieldByName("Resources") - if resourcesField.IsValid() && resourcesField.IsZero() && resourcesField.CanSet() { - resourcesField.Set(reflect.ValueOf(n.resources)) + var resourceRes *commonsv1alpha1.ResourcesSpec + if resourcesField.IsValid() && resourcesField.CanSet() { + if resourcesField.IsZero() { + resourceRes = n.resources + } else { + // adjust resourcesField is commonsv1alpha1.ResourcesSpec + if resourcesField.Type().Kind() == reflect.Ptr && resourcesField.Type().Elem() == reflect.TypeOf(commonsv1alpha1.ResourcesSpec{}) { + //transform resourcesField to *commonsv1alpha1.ResourcesSpec + if resourcesField.Kind() == reflect.Ptr && resourcesField.Type().Elem() == reflect.TypeOf(commonsv1alpha1.ResourcesSpec{}) { + mergedResource := resourcesField.Interface().(*commonsv1alpha1.ResourcesSpec) + if mergedResource == nil { + resourceRes = n.resources + } else { + resourceRes = mergedResource + if mergedResource.CPU == nil { + resourceRes.CPU = n.resources.CPU + } + if mergedResource.Memory == nil { + resourceRes.Memory = n.resources.Memory + } + if mergedResource.Storage == nil { + resourceRes.Storage = n.resources.Storage + } + } + } + } + } + resourcesField.Set(reflect.ValueOf(resourceRes)) } // Get the ListenerClass field @@ -128,7 +157,7 @@ func (n *RoleNodeConfig) MergeDefaultConfig(mergedCfg any) { // config.FieldByName("GracefulShutdownTimeoutSeconds").Set(reflect.ValueOf(n.common.gracefulShutdownTimeoutSeconds)) } -func parseQuantity(q string) *resource.Quantity { +func parseQuantity(q string) resource.Quantity { r := resource.MustParse(q) - return &r + return r } diff --git a/internal/common/util.go b/internal/common/util.go index d47611d..902515d 100644 --- a/internal/common/util.go +++ b/internal/common/util.go @@ -11,6 +11,7 @@ import ( hdfsv1alpha1 "github.com/zncdatadev/hdfs-operator/api/v1alpha1" "github.com/zncdatadev/hdfs-operator/internal/util" + commonsv1alpha1 "github.com/zncdatadev/operator-go/pkg/apis/commons/v1alpha1" "github.com/zncdatadev/operator-go/pkg/constants" ) @@ -43,19 +44,19 @@ func CreateRoleGroupLoggingConfigMapName(instanceName string, role string, group return util.NewResourceNameGenerator(instanceName, role, groupName).GenerateResourceName("log") } -func ConvertToResourceRequirements(resources *hdfsv1alpha1.ResourcesSpec) *corev1.ResourceRequirements { +func ConvertToResourceRequirements(resources *commonsv1alpha1.ResourcesSpec) *corev1.ResourceRequirements { if resources != nil { request := corev1.ResourceList{} limit := corev1.ResourceList{} - if resources.CPU != nil && resources.CPU.Min != nil { - request[corev1.ResourceCPU] = *resources.CPU.Min + if resources.CPU != nil && resources.CPU.Min.IsZero() { + request[corev1.ResourceCPU] = resources.CPU.Min } - if resources.CPU != nil && resources.CPU.Max != nil { - limit[corev1.ResourceCPU] = *resources.CPU.Max + if resources.CPU != nil && resources.CPU.Max.IsZero() { + limit[corev1.ResourceCPU] = resources.CPU.Max } - if resources.Memory != nil && resources.Memory.Limit != nil { - request[corev1.ResourceMemory] = *resources.Memory.Limit - limit[corev1.ResourceMemory] = *resources.Memory.Limit + if resources.Memory != nil && resources.Memory.Limit.IsZero() { + request[corev1.ResourceMemory] = resources.Memory.Limit + limit[corev1.ResourceMemory] = resources.Memory.Limit } r := &corev1.ResourceRequirements{} if len(request) > 0 { @@ -65,18 +66,8 @@ func ConvertToResourceRequirements(resources *hdfsv1alpha1.ResourcesSpec) *corev r.Limits = limit } return r - } else { - return &corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(hdfsv1alpha1.CpuMax), - corev1.ResourceMemory: resource.MustParse(hdfsv1alpha1.MemoryLimit), - }, - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(hdfsv1alpha1.CpuMin), - corev1.ResourceMemory: resource.MustParse(hdfsv1alpha1.MemoryLimit), - }, - } } + return nil } // Name node diff --git a/internal/controller/data/statefulset.go b/internal/controller/data/statefulset.go index 37d7b7d..3370d2b 100644 --- a/internal/controller/data/statefulset.go +++ b/internal/controller/data/statefulset.go @@ -200,7 +200,7 @@ func (s *StatefulSetReconciler) createDataPvcTemplate() corev1.PersistentVolumeC VolumeMode: func() *corev1.PersistentVolumeMode { v := corev1.PersistentVolumeFilesystem; return &v }(), Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceStorage: *storageSize, + corev1.ResourceStorage: storageSize, }, }, }, diff --git a/internal/controller/hdfscluster_controller.go b/internal/controller/hdfscluster_controller.go index 0882942..7667348 100644 --- a/internal/controller/hdfscluster_controller.go +++ b/internal/controller/hdfscluster_controller.go @@ -18,9 +18,10 @@ package controller import ( "context" - "emperror.dev/errors" "time" + "emperror.dev/errors" + "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" diff --git a/internal/controller/journal/statefulset.go b/internal/controller/journal/statefulset.go index b3f764a..93bd5d3 100644 --- a/internal/controller/journal/statefulset.go +++ b/internal/controller/journal/statefulset.go @@ -163,7 +163,7 @@ func (s *StatefulSetReconciler) createDataPvcTemplate() corev1.PersistentVolumeC VolumeMode: func() *corev1.PersistentVolumeMode { v := corev1.PersistentVolumeFilesystem; return &v }(), Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceStorage: *s.MergedCfg.Config.Resources.Storage.Capacity, + corev1.ResourceStorage: s.MergedCfg.Config.Resources.Storage.Capacity, }, }, }, diff --git a/internal/controller/name/statefulset.go b/internal/controller/name/statefulset.go index 5e93d8f..cab0201 100644 --- a/internal/controller/name/statefulset.go +++ b/internal/controller/name/statefulset.go @@ -243,7 +243,7 @@ func (s *StatefulSetReconciler) createDataPvcTemplate() corev1.PersistentVolumeC VolumeMode: func() *corev1.PersistentVolumeMode { v := corev1.PersistentVolumeFilesystem; return &v }(), Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceStorage: *storageSize, + corev1.ResourceStorage: storageSize, }, }, }, diff --git a/test/e2e/default/chainsaw-test.yaml b/test/e2e/default/chainsaw-test.yaml index 190d594..4948f77 100644 --- a/test/e2e/default/chainsaw-test.yaml +++ b/test/e2e/default/chainsaw-test.yaml @@ -34,6 +34,7 @@ spec: - apply: file: hdfs.yaml - assert: + timeout: 360s file: hdfs-assert.yaml cleanup: - sleep: diff --git a/test/e2e/default/hdfs.yaml b/test/e2e/default/hdfs.yaml index b3dd126..bf8e3e9 100644 --- a/test/e2e/default/hdfs.yaml +++ b/test/e2e/default/hdfs.yaml @@ -34,6 +34,12 @@ spec: default: replicas: 1 config: + resources: + cpu: + min: 200m + max: 800m + memory: + limit: "1Gi" logging: journalNode: loggers: diff --git a/test/e2e/setup/listener.yaml b/test/e2e/setup/listener.yaml index 1701169..4377379 100644 --- a/test/e2e/setup/listener.yaml +++ b/test/e2e/setup/listener.yaml @@ -11,7 +11,7 @@ metadata: spec: csiDriver: repository: quay.io/zncdatadev/listener-csi-driver - tag: v0.0.1 + tag: "0.0.0-dev" pullPolicy: IfNotPresent logging: level: "10" diff --git a/test/e2e/setup/olm.yaml b/test/e2e/setup/olm.yaml index d24dd01..79f2300 100644 --- a/test/e2e/setup/olm.yaml +++ b/test/e2e/setup/olm.yaml @@ -17,7 +17,7 @@ spec: sourceType: grpc grpcPodConfig: securityContextConfig: restricted - image: quay.io/zncdatadev/kubedata-catalog:latest + image: quay.io/zncdatadev/kubedoop-catalog:latest displayName: kubedata-catalog publisher: zncdatadev updateStrategy: diff --git a/test/e2e/setup/zookeeper-assert.yaml b/test/e2e/setup/zookeeper-assert.yaml index 569f7bb..470d644 100644 --- a/test/e2e/setup/zookeeper-assert.yaml +++ b/test/e2e/setup/zookeeper-assert.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: zookeepercluster-sample-default + name: zookeepercluster-sample-server-default status: availableReplicas: 1 replicas: 1 diff --git a/test/e2e/setup/zookeeper.yaml b/test/e2e/setup/zookeeper.yaml index e6283d4..ff007b3 100644 --- a/test/e2e/setup/zookeeper.yaml +++ b/test/e2e/setup/zookeeper.yaml @@ -9,9 +9,6 @@ metadata: app.kubernetes.io/created-by: zookeeper-operator name: zookeepercluster-sample spec: - image: - repository: docker.io/bitnami/zookeeper - tag: 3.9.1-debian-12-r15 clusterConfig: listenerClass: external-unstable server: diff --git a/test/e2e/vector/chainsaw-test.yaml b/test/e2e/vector/chainsaw-test.yaml index 85eae99..88cec27 100644 --- a/test/e2e/vector/chainsaw-test.yaml +++ b/test/e2e/vector/chainsaw-test.yaml @@ -39,6 +39,7 @@ spec: - apply: file: hdfs.yaml - assert: + timeout: 360s file: hdfs-assert.yaml cleanup: - sleep: diff --git a/test/e2e/vector/hdfs.yaml b/test/e2e/vector/hdfs.yaml index 7989daf..7c4d4ef 100644 --- a/test/e2e/vector/hdfs.yaml +++ b/test/e2e/vector/hdfs.yaml @@ -27,7 +27,6 @@ spec: config: logging: enableVectorAgent: true - dataNode: roleGroups: default: