Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

some cleanup, deprecate emptyDir usage #174

Merged
merged 1 commit into from
Jul 20, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions pkg/apis/operator.min.io/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,11 @@ type MCSConfig struct {
Metadata *metav1.ObjectMeta `json:"metadata,omitempty"`
}

// EqualImage returns true if config image and current input image are same
func (c MCSConfig) EqualImage(currentImage string) bool {
return c.Image == currentImage
}

// KESConfig defines the specifications for KES StatefulSet
type KESConfig struct {
// Replicas defines number of pods for KES StatefulSet.
Expand Down
47 changes: 21 additions & 26 deletions pkg/controller/cluster/main-controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ func NewController(

// Start will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it w ill shutdown the workqueue and wait for
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Start(threadiness int, stopCh <-chan struct{}) error {

Expand Down Expand Up @@ -278,12 +278,12 @@ func (c *Controller) runWorker() {
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()

if shutdown {
return false
}

// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface{}) error {
processItem := func(obj interface{}) error {
// We call Done here so the workqueue knows we have finished
// processing this item. We also must remember to call Forget if we
// do not want this work item being re-queued. For example, we do
Expand Down Expand Up @@ -317,9 +317,9 @@ func (c *Controller) processNextWorkItem() bool {
c.workqueue.Forget(obj)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
}

if err != nil {
if err := processItem(obj); err != nil {
runtime.HandleError(err)
return true
}
Expand All @@ -335,7 +335,7 @@ func (c *Controller) syncHandler(key string) error {
uOpts := metav1.UpdateOptions{}
gOpts := metav1.GetOptions{}

var d *appsv1.Deployment
var mcsDeployment *appsv1.Deployment

// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
Expand Down Expand Up @@ -528,8 +528,8 @@ func (c *Controller) syncHandler(key string) error {
return pErr
}
// Create MCS Deployment
d = deployments.NewForMCS(mi)
_, err = c.kubeClientSet.AppsV1().Deployments(mi.Namespace).Create(ctx, d, cOpts)
mcsDeployment = deployments.NewForMCS(mi)
_, err = c.kubeClientSet.AppsV1().Deployments(mi.Namespace).Create(ctx, mcsDeployment, cOpts)
if err != nil {
klog.V(2).Infof(err.Error())
return err
Expand Down Expand Up @@ -617,14 +617,15 @@ func (c *Controller) syncHandler(key string) error {
return fmt.Errorf(msg)
}

if mi.HasMCSEnabled() && d != nil && mi.Spec.MCS.Image != d.Spec.Template.Spec.Containers[0].Image {
if mi.HasMCSEnabled() && mcsDeployment != nil && !mi.Spec.MCS.EqualImage(mcsDeployment.Spec.Template.Spec.Containers[0].Image) {
mi, err = c.updateMinIOInstanceStatus(ctx, mi, updatingMCSVersion, ss.Status.Replicas)
if err != nil {
return err
}
klog.V(4).Infof("Updating MinIOInstance %s mcs version %s, to: %s", name, mi.Spec.MCS.Image, d.Spec.Template.Spec.Containers[0].Image)
d = deployments.NewForMCS(mi)
_, err = c.kubeClientSet.AppsV1().Deployments(mi.Namespace).Update(ctx, d, uOpts)
klog.V(4).Infof("Updating MinIOInstance %s mcs version %s, to: %s", name,
mi.Spec.MCS.Image, mcsDeployment.Spec.Template.Spec.Containers[0].Image)
mcsDeployment = deployments.NewForMCS(mi)
_, err = c.kubeClientSet.AppsV1().Deployments(mi.Namespace).Update(ctx, mcsDeployment, uOpts)
// If an error occurs during Update, we'll requeue the item so we can
// attempt processing again later. This could have been caused by a
// temporary network failure, or any other transient reason.
Expand All @@ -636,10 +637,7 @@ func (c *Controller) syncHandler(key string) error {
// Finally, we update the status block of the MinIOInstance resource to reflect the
// current state of the world
_, err = c.updateMinIOInstanceStatus(ctx, mi, ready, ss.Status.Replicas)
if err != nil {
return err
}
return nil
return err
}

func (c *Controller) checkAndCreateMinIOCSR(ctx context.Context, nsName types.NamespacedName, mi *miniov1.MinIOInstance, createClientCert bool) error {
Expand Down Expand Up @@ -733,22 +731,19 @@ func (c *Controller) updateMinIOInstanceStatus(ctx context.Context, minioInstanc
minioInstanceCopy := minioInstance.DeepCopy()
minioInstanceCopy.Status.AvailableReplicas = availableReplicas
minioInstanceCopy.Status.CurrentState = currentState
// If the CustomResourceSubresources feature gate is not enabled, // If the CustomResourceSubresources feature gate is not enabled,
// we must use Update instead of UpdateStatus to update the Status block of the MinIOInstance resource. // we must use Update instead of UpdateStatus to update the Status block of the MinIOInstance resource.
// UpdateStatus will not allow changes to the Spec of the resource, // UpdateStatus will not allow changes to the Spec of the resource,
// which is ideal for ensuring nothing other than resource status has been updated. // which is ideal for ensuring nothing other than resource status has been updated.
mi, err := c.minioClientSet.OperatorV1().MinIOInstances(minioInstance.Namespace).UpdateStatus(ctx, minioInstanceCopy, opts)
time.Sleep(time.Second * 2)
return mi, err
// If the CustomResourceSubresources feature gate is not enabled,
// we must use Update instead of UpdateStatus to update the Status block of the MinIOInstance resource.
// UpdateStatus will not allow changes to the Spec of the resource,
// which is ideal for ensuring nothing other than resource status has been updated.
return c.minioClientSet.OperatorV1().MinIOInstances(minioInstance.Namespace).UpdateStatus(ctx, minioInstanceCopy, opts)
}

// enqueueMinIOInstance takes a MinIOInstance resource and converts it into a namespace/name
// string which is then put onto the work queue. This method should *not* be
// passed resources of any type other than MinIOInstance.
func (c *Controller) enqueueMinIOInstance(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
key, err := cache.MetaNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(err)
return
}
Expand Down
16 changes: 1 addition & 15 deletions pkg/resources/statefulsets/minio-statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,23 +221,9 @@ func minioSecurityContext(mi *miniov1.MinIOInstance) *corev1.PodSecurityContext
return &securityContext
}

func getVolumesForContainer(mi *miniov1.MinIOInstance) []corev1.Volume {
var podVolumes = []corev1.Volume{}
// This is the case where user didn't provide a volume claim template and we deploy a
// EmptyDir based MinIO deployment
if mi.Spec.VolumeClaimTemplate == nil {
for _, z := range mi.Spec.Zones {
podVolumes = append(podVolumes, corev1.Volume{Name: z.Name,
VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}}})
}
}
return podVolumes
}

// NewForMinIO creates a new StatefulSet for the given Cluster.
func NewForMinIO(mi *miniov1.MinIOInstance, serviceName string, hostsTemplate string) *appsv1.StatefulSet {
// If a PV isn't specified just use a EmptyDir volume
var podVolumes = getVolumesForContainer(mi)
var podVolumes []corev1.Volume
var replicas = mi.MinIOReplicas()
var serverCertSecret string
var serverCertPaths = []corev1.KeyToPath{
Expand Down