Skip to content

Commit

Permalink
feat: propagation controller now have the ability to propagate the Ap…
Browse files Browse the repository at this point in the history
…plication operation field. Also updated the e2e propagation test to use AppSet progressive sync instead of automated sync. Since automated sync is now turned off in e2e test appset, the test will only pass when the operation field is properly propagated which triggers a sync

Signed-off-by: Mike Ng <ming@redhat.com>
  • Loading branch information
mikeshng committed Nov 21, 2024
1 parent 30ca210 commit bdd343e
Show file tree
Hide file tree
Showing 4 changed files with 70 additions and 2 deletions.
12 changes: 10 additions & 2 deletions e2e/hub_app/guestbook-app-set.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,21 @@ spec:
matchLabels:
cluster.open-cluster-management.io/placement: guestbook-app-placement
requeueAfterSeconds: 10
strategy:
type: RollingSync
rollingSync:
steps:
- matchExpressions:
- key: envLabel
operator: In
values:
- env-dev
template:
metadata:
name: '{{name}}-guestbook-app'
labels:
apps.open-cluster-management.io/pull-to-ocm-managed-cluster: 'true'
envLabel: 'env-dev'
annotations:
argocd.argoproj.io/skip-reconcile: 'true'
apps.open-cluster-management.io/ocm-managed-cluster: '{{name}}'
Expand All @@ -30,7 +40,5 @@ spec:
server: https://kubernetes.default.svc
namespace: guestbook
syncPolicy:
automated:
prune: true
syncOptions:
- CreateNamespace=true
10 changes: 10 additions & 0 deletions e2e/run_e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ kubectl -n argocd scale deployment/argocd-redis --replicas 0
kubectl -n argocd scale deployment/argocd-notifications-controller --replicas 0
kubectl -n argocd scale statefulset/argocd-application-controller --replicas 0

# enable progressive sync
kubectl -n argocd patch configmap argocd-cmd-params-cm --type merge -p '{"data":{"applicationsetcontroller.enable.progressive.syncs":"true"}}'
kubectl -n argocd rollout restart deployment argocd-applicationset-controller

sleep 60s

echo "TEST Propgation controller startup"
Expand Down Expand Up @@ -131,6 +135,12 @@ else
echo "Propagation FAILED: manifestwork does not contain appSet hash"
exit 1
fi
if kubectl -n cluster1 get manifestwork -o yaml | grep RollingSync; then
echo "Propagation: manifestwork contains operation RollingSync"
else
echo "Propagation FAILED: manifestwork does not contain operation RollingSync"
exit 1
fi
kubectl config use-context kind-cluster1
if kubectl -n argocd get app cluster1-guestbook-app | grep Synced | grep Healthy; then
echo "Propagation: managed cluster application cluster1-guestbook-app created, synced and healthy"
Expand Down
7 changes: 7 additions & 0 deletions propagation-controller/application/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ func getAppSetOwnerName(ownerRefs []metav1.OwnerReference) string {
// - reset the meta
// - set the namespace value
// - ensures the Application Destination is set to in-cluster resource deployment
// - ensures the operation field is also set if present
func prepareApplicationForWorkPayload(application *unstructured.Unstructured) unstructured.Unstructured {
newApp := &unstructured.Unstructured{}
newApp.SetGroupVersionKind(schema.GroupVersionKind{
Expand All @@ -141,6 +142,12 @@ func prepareApplicationForWorkPayload(application *unstructured.Unstructured) un
newApp.SetName(application.GetName())
newApp.SetFinalizers(application.GetFinalizers())

// set the operation field
if operation, ok := application.Object["operation"].(map[string]interface{}); ok {
newApp.Object["operation"] = operation
}

// set the spec field
if newSpec, ok := application.Object["spec"].(map[string]interface{}); ok {
if destination, ok := newSpec["destination"].(map[string]interface{}); ok {
// empty the name
Expand Down
43 changes: 43 additions & 0 deletions propagation-controller/application/helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,6 +314,24 @@ func Test_prepareApplicationForWorkPayload(t *testing.T) {
"server": "originalServer",
},
}
app.Object["operation"] = map[string]interface{}{
"info": []interface{}{
map[string]interface{}{
"name": "Reason",
"value": "ApplicationSet RollingSync triggered a sync of this Application resource.",
},
},
"initiatedBy": map[string]interface{}{
"automated": true,
"username": "applicationset-controller",
},
"retry": map[string]interface{}{},
"sync": map[string]interface{}{
"syncOptions": []interface{}{
"CreateNamespace=true",
},
},
}

type args struct {
application *unstructured.Unstructured
Expand Down Expand Up @@ -344,6 +362,24 @@ func Test_prepareApplicationForWorkPayload(t *testing.T) {
"server": KubernetesInternalAPIServerAddr,
},
}
expectedApp.Object["operation"] = map[string]interface{}{
"info": []interface{}{
map[string]interface{}{
"name": "Reason",
"value": "ApplicationSet RollingSync triggered a sync of this Application resource.",
},
},
"initiatedBy": map[string]interface{}{
"automated": true,
"username": "applicationset-controller",
},
"retry": map[string]interface{}{},
"sync": map[string]interface{}{
"syncOptions": []interface{}{
"CreateNamespace=true",
},
},
}
return expectedApp
}(),
},
Expand Down Expand Up @@ -371,6 +407,13 @@ func Test_prepareApplicationForWorkPayload(t *testing.T) {
t.Errorf("prepareApplicationForWorkPayload() Spec = %v, want %v", gotSpec, wantSpec)
}

gotOperation, _, _ := unstructured.NestedMap(got.Object, "operation")
wantOperation, _, _ := unstructured.NestedMap(tt.want.Object, "operation")

if !reflect.DeepEqual(gotOperation, wantOperation) {
t.Errorf("prepareApplicationForWorkPayload() Operation = %v, want %v", gotOperation, wantOperation)
}

if !reflect.DeepEqual(got.GetLabels(), tt.want.GetLabels()) {
t.Errorf("prepareApplicationForWorkPayload() Labels = %v, want %v", got.GetLabels(), tt.want.GetLabels())
}
Expand Down

0 comments on commit bdd343e

Please sign in to comment.