Skip to content

Commit

Permalink
Merge pull request #368 from gianlucam76/release-0.18
Browse files Browse the repository at this point in the history
Release 0.18.2
  • Loading branch information
gianlucam76 authored Oct 19, 2023
2 parents b5f8649 + ddceedf commit c22f40f
Show file tree
Hide file tree
Showing 14 changed files with 67 additions and 36 deletions.
21 changes: 18 additions & 3 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.0
go-version: 1.20.8
- name: Build
run: make build
- name: FMT
Expand All @@ -37,7 +37,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.0
go-version: 1.20.8
- name: ut
run: make test
env:
Expand All @@ -50,7 +50,22 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.0
go-version: 1.20.8
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false

# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: fv
run: make create-cluster fv
env:
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ ARCH ?= amd64
OS ?= $(shell uname -s | tr A-Z a-z)
K8S_LATEST_VER ?= $(shell curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)
export CONTROLLER_IMG ?= $(REGISTRY)/$(IMAGE_NAME)
TAG ?= v0.18.1
TAG ?= v0.18.2

# Get cluster-api version and build ldflags
clusterapi := $(shell go list -m sigs.k8s.io/cluster-api)
Expand Down
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
- [Quick Start](https://projectsveltos.github.io/sveltos/quick_start/)

# What is the Projectsveltos?
Projectsveltos is mainly a Kubernetes add-on controller that simplifies the deployment and management of add-ons in Kubernetes clusters. With Sveltos controller, you can easily automate the deployment process and ensure consistency across your cluster environment. Sveltos can deploy raw Kubernetes YAMLs, Helm charts, or Kustomize or any combination of those.
Sveltos is a Kubernetes add-on controller that simplifies the deployment and management of add-ons and applications across multiple clusters. It runs in the management cluster and can programmatically deploy and manage add-ons and applications on any cluster in the fleet, including the management cluster itself. Sveltos supports a variety of add-on formats, including Helm charts, raw YAML, Kustomize, Carvel ytt, and Jsonnet.

## Addon deployment: how it works

Expand Down Expand Up @@ -63,6 +63,8 @@ spec:
As soon as a cluster is a match for above ClusterProfile instance, all referenced features are automatically deployed in such cluster.
![Kubernetes add-on deployment](https://github.com/projectsveltos/sveltos/blob/main/docs/assets/addons_deployment.gif)
Here is an example using Kustomize:
```yaml
Expand Down
2 changes: 1 addition & 1 deletion config/default/manager_image_patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ spec:
spec:
containers:
# Change the value of image field below to your controller image URL
- image: projectsveltos/addon-controller-amd64:v0.18.1
- image: projectsveltos/addon-controller-amd64:v0.18.2
name: controller
38 changes: 18 additions & 20 deletions controllers/clusterprofile_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,17 +159,16 @@ func (r *ClusterProfileReconciler) Reconcile(ctx context.Context, req ctrl.Reque

// Handle deleted clusterProfile
if !clusterProfile.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, clusterProfileScope)
return r.reconcileDelete(ctx, clusterProfileScope), nil
}

// Handle non-deleted clusterProfile
return r.reconcileNormal(ctx, clusterProfileScope)
return r.reconcileNormal(ctx, clusterProfileScope), nil
}

func (r *ClusterProfileReconciler) reconcileDelete(
ctx context.Context,
clusterProfileScope *scope.ClusterProfileScope,
) (reconcile.Result, error) {
clusterProfileScope *scope.ClusterProfileScope) reconcile.Result {

logger := clusterProfileScope.Logger
logger.V(logs.LogInfo).Info("Reconciling ClusterProfile delete")
Expand All @@ -178,27 +177,27 @@ func (r *ClusterProfileReconciler) reconcileDelete(

if err := r.cleanClusterSummaries(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to clean ClusterSummaries")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}
}

if !r.allClusterSummariesGone(ctx, clusterProfileScope) {
logger.V(logs.LogInfo).Info("Not all cluster summaries are gone")
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}, nil
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}
}

if err := r.cleanClusterConfigurations(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to clean ClusterConfigurations")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}
}

if err := r.cleanClusterReports(ctx, clusterProfileScope.ClusterProfile); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to clean ClusterReports")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}
}

if !r.canRemoveFinalizer(ctx, clusterProfileScope) {
logger.V(logs.LogInfo).Info("Cannot remove finalizer yet")
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}, nil
return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}
}

if controllerutil.ContainsFinalizer(clusterProfileScope.ClusterProfile, configv1alpha1.ClusterProfileFinalizer) {
Expand All @@ -208,26 +207,25 @@ func (r *ClusterProfileReconciler) reconcileDelete(
r.cleanMaps(clusterProfileScope)

logger.V(logs.LogInfo).Info("Reconcile delete success")
return reconcile.Result{}, nil
return reconcile.Result{}
}

func (r *ClusterProfileReconciler) reconcileNormal(
ctx context.Context,
clusterProfileScope *scope.ClusterProfileScope,
) (reconcile.Result, error) {
clusterProfileScope *scope.ClusterProfileScope) reconcile.Result {

logger := clusterProfileScope.Logger
logger.V(logs.LogInfo).Info("Reconciling ClusterProfile")

if !controllerutil.ContainsFinalizer(clusterProfileScope.ClusterProfile, configv1alpha1.ClusterProfileFinalizer) {
if err := r.addFinalizer(ctx, clusterProfileScope); err != nil {
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}
}

matchingCluster, err := r.getMatchingClusters(ctx, clusterProfileScope, logger)
if err != nil {
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}

clusterProfileScope.SetMatchingClusterRefs(matchingCluster)
Expand All @@ -237,33 +235,33 @@ func (r *ClusterProfileReconciler) reconcileNormal(
// For each matching Sveltos/CAPI Cluster, create/update corresponding ClusterConfiguration
if err := r.updateClusterConfigurations(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to update ClusterConfigurations")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}
// For each matching Sveltos/CAPI Cluster, create or delete corresponding ClusterReport if needed
if err := r.updateClusterReports(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to update ClusterReports")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}
// For each matching Sveltos/CAPI Cluster, create/update corresponding ClusterSummary
if err := r.updateClusterSummaries(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to update ClusterSummaries")
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}, nil
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}

// For Sveltos/CAPI Cluster not matching ClusterProfile, deletes corresponding ClusterSummary
if err := r.cleanClusterSummaries(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to clean ClusterSummaries")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}
// For Sveltos/CAPI Cluster not matching ClusterProfile, removes ClusterProfile as OwnerReference
// from corresponding ClusterConfiguration
if err := r.cleanClusterConfigurations(ctx, clusterProfileScope); err != nil {
logger.V(logs.LogInfo).Error(err, "failed to clean ClusterConfigurations")
return reconcile.Result{}, err
return reconcile.Result{Requeue: true, RequeueAfter: normalRequeueAfter}
}

logger.V(logs.LogInfo).Info("Reconcile success")
return reconcile.Result{}, nil
return reconcile.Result{}
}

// SetupWithManager sets up the controller with the Manager.
Expand Down
5 changes: 3 additions & 2 deletions controllers/clusterprofile_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -385,11 +385,12 @@ var _ = Describe("ClusterProfile: Reconciler", func() {
// Since ClusterSummary won't be removed, ClusterProfile's finalizer will not be
// removed.

_, err := reconciler.Reconcile(context.TODO(), ctrl.Request{
result, err := reconciler.Reconcile(context.TODO(), ctrl.Request{
NamespacedName: clusterProfileName,
})
// Because there was one ClusterSummary and reconciliation deleted it and returned an error
Expect(err).To(HaveOccurred())
Expect(err).To(BeNil())
Expect(result.Requeue).To(BeTrue())

currentClusterProfile := &configv1alpha1.ClusterProfile{}
err = c.Get(context.TODO(), clusterProfileName, currentClusterProfile)
Expand Down
15 changes: 15 additions & 0 deletions examples/deploy_kubevela.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: config.projectsveltos.io/v1alpha1
kind: ClusterProfile
metadata:
name: kubevela-core
spec:
clusterSelector: env=production
syncMode: Continuous
helmCharts:
- repositoryURL: https://kubevela.github.io/charts
repositoryName: kubevela
chartName: kubevela/vela-core
chartVersion: 1.9.6
releaseName: kubevela-core-latest
releaseNamespace: vela-system
helmChartAction: Install
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ require (
github.com/onsi/ginkgo/v2 v2.11.0
github.com/onsi/gomega v1.27.8
github.com/pkg/errors v0.9.1
github.com/projectsveltos/libsveltos v0.18.1
github.com/projectsveltos/libsveltos v0.18.2
github.com/prometheus/client_golang v1.16.0
github.com/spf13/pflag v1.0.5
github.com/yuin/gopher-lua v1.1.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -557,8 +557,8 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
github.com/projectsveltos/libsveltos v0.18.1 h1:pJD4BXavrV/HtXyrFrGPPxjWBZuHmomvrw8esVGJ3+Y=
github.com/projectsveltos/libsveltos v0.18.1/go.mod h1:4O07OQ3apCERUugy+9k3KOc1MByW727b2dMVAwnLHnM=
github.com/projectsveltos/libsveltos v0.18.2 h1:Qw8qbqCJKWHGo8pboEJvxMduw8C8xilV28W7pcUUxcM=
github.com/projectsveltos/libsveltos v0.18.2/go.mod h1:4O07OQ3apCERUugy+9k3KOc1MByW727b2dMVAwnLHnM=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
Expand Down
2 changes: 1 addition & 1 deletion manifest/manifest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2191,7 +2191,7 @@ spec:
- --v=5
command:
- /manager
image: projectsveltos/addon-controller-amd64:v0.18.1
image: projectsveltos/addon-controller-amd64:v0.18.2
livenessProbe:
httpGet:
path: /healthz
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ spec:
- --run-mode=do-not-send-updates
command:
- /manager
image: projectsveltos/drift-detection-manager-amd64:v0.18.1
image: projectsveltos/drift-detection-manager-amd64:v0.18.2
livenessProbe:
httpGet:
path: /healthz
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ spec:
- --run-mode=do-not-send-updates
command:
- /manager
image: projectsveltos/drift-detection-manager-amd64:v0.18.1
image: projectsveltos/drift-detection-manager-amd64:v0.18.2
livenessProbe:
httpGet:
path: /healthz
Expand Down
2 changes: 1 addition & 1 deletion pkg/drift-detection/drift-detection-manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ spec:
- --run-mode=do-not-send-updates
command:
- /manager
image: projectsveltos/drift-detection-manager-amd64:v0.18.1
image: projectsveltos/drift-detection-manager-amd64:v0.18.2
livenessProbe:
httpGet:
path: /healthz
Expand Down
2 changes: 1 addition & 1 deletion pkg/drift-detection/drift-detection-manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ spec:
- --run-mode=do-not-send-updates
command:
- /manager
image: projectsveltos/drift-detection-manager-amd64:v0.18.1
image: projectsveltos/drift-detection-manager-amd64:v0.18.2
livenessProbe:
httpGet:
path: /healthz
Expand Down

0 comments on commit c22f40f

Please sign in to comment.