Skip to content

Commit

Permalink
more fixes
Browse files Browse the repository at this point in the history
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
  • Loading branch information
crenshaw-dev committed Dec 16, 2024
1 parent ea853a3 commit f40cad8
Show file tree
Hide file tree
Showing 13 changed files with 106 additions and 107 deletions.
3 changes: 0 additions & 3 deletions applicationset/controllers/applicationset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -721,9 +721,6 @@ func (r *ApplicationSetReconciler) getCurrentApplications(ctx context.Context, a
// deleteInCluster will delete Applications that are currently on the cluster, but not in appList.
// The function must be called after all generators had been called and generated applications
func (r *ApplicationSetReconciler) deleteInCluster(ctx context.Context, logCtx *log.Entry, applicationSet argov1alpha1.ApplicationSet, desiredApplications []argov1alpha1.Application) error {
// settingsMgr := settings.NewSettingsManager(context.TODO(), r.KubeClientset, applicationSet.Namespace)
// argoDB := db.NewDB(applicationSet.Namespace, settingsMgr, r.KubeClientset)
// clusterList, err := argoDB.ListClusters(ctx)
clusterList, err := utils.ListClusters(ctx, r.KubeClientset, r.ArgoCDNamespace)
if err != nil {
return fmt.Errorf("error listing clusters: %w", err)
Expand Down
6 changes: 5 additions & 1 deletion cmd/argocd/commands/admin/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,11 @@ func reconcileApplications(
sources = append(sources, app.Spec.GetSource())
revisions = append(revisions, app.Spec.GetSource().TargetRevision)

res, err := appStateManager.CompareAppState(&app, proj, revisions, sources, false, false, nil, false, false)
destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, argoDB)
if err != nil {
return nil, fmt.Errorf("error getting destination cluster: %w", err)
}
res, err := appStateManager.CompareAppState(destCluster, &app, proj, revisions, sources, false, false, nil, false, false)
if err != nil {
return nil, fmt.Errorf("error comparing app states: %w", err)
}
Expand Down
44 changes: 23 additions & 21 deletions controller/appcontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -473,7 +473,7 @@ func (ctrl *ApplicationController) setAppManagedResources(destCluster *appv1.Clu
logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
logCtx.Debug("Finished setting app managed resources")
}()
managedResources, err := ctrl.hideSecretData(a, comparisonResult)
managedResources, err := ctrl.hideSecretData(destCluster, a, comparisonResult)
ts.AddCheckpoint("hide_secret_data_ms")
if err != nil {
return nil, fmt.Errorf("error getting managed resources: %w", err)
Expand Down Expand Up @@ -576,7 +576,7 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
managedResourcesKeys = append(managedResourcesKeys, kube.GetResourceKey(live))
}
}
err = ctrl.stateCache.IterateHierarchyV2(a.Spec.Destination.Server, managedResourcesKeys, func(child appv1.ResourceNode, appName string) bool {
err = ctrl.stateCache.IterateHierarchyV2(destCluster.Server, managedResourcesKeys, func(child appv1.ResourceNode, appName string) bool {
permitted, _ := proj.IsResourcePermitted(schema.GroupKind{Group: child.ResourceRef.Group, Kind: child.ResourceRef.Kind}, child.Namespace, destCluster, func(project string) ([]*appv1.Cluster, error) {
clusters, err := ctrl.db.GetProjectClusters(context.TODO(), project)
if err != nil {
Expand Down Expand Up @@ -642,15 +642,15 @@ func (ctrl *ApplicationController) getResourceTree(destCluster *appv1.Cluster, a
})
ts.AddCheckpoint("process_orphaned_resources_ms")

hosts, err := ctrl.getAppHosts(a, nodes)
hosts, err := ctrl.getAppHosts(destCluster, a, nodes)
if err != nil {
return nil, fmt.Errorf("failed to get app hosts: %w", err)
}
ts.AddCheckpoint("get_app_hosts_ms")
return &appv1.ApplicationTree{Nodes: nodes, OrphanedNodes: orphanedNodes, Hosts: hosts}, nil
}

func (ctrl *ApplicationController) getAppHosts(a *appv1.Application, appNodes []appv1.ResourceNode) ([]appv1.HostInfo, error) {
func (ctrl *ApplicationController) getAppHosts(destCluster *appv1.Cluster, a *appv1.Application, appNodes []appv1.ResourceNode) ([]appv1.HostInfo, error) {
ts := stats.NewTimingStats()
defer func() {
logCtx := getAppLog(a)
Expand All @@ -675,7 +675,7 @@ func (ctrl *ApplicationController) getAppHosts(a *appv1.Application, appNodes []
allNodesInfo := map[string]statecache.NodeInfo{}
allPodsByNode := map[string][]statecache.PodInfo{}
appPodsByNode := map[string][]statecache.PodInfo{}
err := ctrl.stateCache.IterateResources(a.Spec.Destination.Server, func(res *clustercache.Resource, info *statecache.ResourceInfo) {
err := ctrl.stateCache.IterateResources(destCluster.Server, func(res *clustercache.Resource, info *statecache.ResourceInfo) {
key := res.ResourceKey()

switch {
Expand Down Expand Up @@ -749,7 +749,7 @@ func (ctrl *ApplicationController) getAppHosts(a *appv1.Application, appNodes []
return hosts, nil
}

func (ctrl *ApplicationController) hideSecretData(app *appv1.Application, comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) {
func (ctrl *ApplicationController) hideSecretData(destCluster *appv1.Cluster, app *appv1.Application, comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) {
items := make([]*appv1.ResourceDiff, len(comparisonResult.managedResources))
for i := range comparisonResult.managedResources {
res := comparisonResult.managedResources[i]
Expand Down Expand Up @@ -788,7 +788,7 @@ func (ctrl *ApplicationController) hideSecretData(app *appv1.Application, compar
return nil, fmt.Errorf("error getting tracking method: %w", err)
}

clusterCache, err := ctrl.stateCache.GetClusterCache(app.Spec.Destination.Server)
clusterCache, err := ctrl.stateCache.GetClusterCache(destCluster.Server)
if err != nil {
return nil, fmt.Errorf("error getting cluster cache: %w", err)
}
Expand Down Expand Up @@ -1112,7 +1112,7 @@ func (ctrl *ApplicationController) shouldBeDeleted(app *appv1.Application, obj *
}

func (ctrl *ApplicationController) getPermittedAppLiveObjects(destCluster *appv1.Cluster, app *appv1.Application, proj *appv1.AppProject, projectClusters func(project string) ([]*appv1.Cluster, error)) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
objsMap, err := ctrl.stateCache.GetManagedLiveObjs(app, []*unstructured.Unstructured{})
objsMap, err := ctrl.stateCache.GetManagedLiveObjs(destCluster, app, []*unstructured.Unstructured{})
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -1229,7 +1229,7 @@ func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Applic
return err
}

done, err := ctrl.executePostDeleteHooks(app, proj, objsMap, config, logCtx)
done, err := ctrl.executePostDeleteHooks(destCluster, app, proj, objsMap, config, logCtx)
if err != nil {
return err
}
Expand Down Expand Up @@ -1382,16 +1382,17 @@ func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Appli
}
ts.AddCheckpoint("initial_operation_stage_ms")

if _, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db); err != nil {
destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db)
if err != nil {
state.Phase = synccommon.OperationFailed
state.Message = err.Error()
} else {
ctrl.appStateManager.SyncAppState(app, state)
ctrl.appStateManager.SyncAppState(destCluster, app, state)
}
ts.AddCheckpoint("validate_and_sync_app_state_ms")

// Check whether application is allowed to use project
_, err := ctrl.getAppProj(app)
_, err = ctrl.getAppProj(app)
ts.AddCheckpoint("get_app_proj_ms")
if err != nil {
state.Phase = synccommon.OperationError
Expand Down Expand Up @@ -1683,9 +1684,16 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo
sources = append(sources, app.Spec.GetSource())
}

compareResult, err := ctrl.appStateManager.CompareAppState(app, project, revisions, sources,
destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db)
if err != nil {
logCtx.Errorf("Failed to get destination cluster: %v", err)
return
}

compareResult, err := ctrl.appStateManager.CompareAppState(destCluster, app, project, revisions, sources,
refreshType == appv1.RefreshTypeHard,
comparisonLevel == CompareWithLatestForceResolve, localManifests, hasMultipleSources, false)

ts.AddCheckpoint("compare_app_state_ms")

if goerrors.Is(err, CompareStateRepoError) {
Expand All @@ -1699,12 +1707,6 @@ func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext boo

ctrl.normalizeApplication(origApp, app)
ts.AddCheckpoint("normalize_application_ms")

destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db)
if err != nil {
logCtx.Errorf("Failed to get destination cluster: %v", err)
return
}
tree, err := ctrl.setAppManagedResources(destCluster, app, compareResult)
ts.AddCheckpoint("set_app_managed_resources_ms")
if err != nil {
Expand Down Expand Up @@ -2181,11 +2183,11 @@ func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
}
}

cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, ctrl.db)
if err != nil {
return ctrl.clusterSharding.IsManagedCluster(nil)
}
return ctrl.clusterSharding.IsManagedCluster(cluster)
return ctrl.clusterSharding.IsManagedCluster(destCluster)
}

func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) {
Expand Down
4 changes: 2 additions & 2 deletions controller/appcontroller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -936,7 +936,7 @@ func TestFinalizeAppDeletion(t *testing.T) {
})
require.NoError(t, err)
assert.True(t, patched)
objsMap, err := ctrl.stateCache.GetManagedLiveObjs(app, []*unstructured.Unstructured{})
objsMap, err := ctrl.stateCache.GetManagedLiveObjs(&v1alpha1.Cluster{Server: "test", Name: "test"}, app, []*unstructured.Unstructured{})
if err != nil {
require.NoError(t, err)
}
Expand Down Expand Up @@ -2185,7 +2185,7 @@ func TestGetAppHosts(t *testing.T) {
})).Return(nil)
ctrl.stateCache = mockStateCache

hosts, err := ctrl.getAppHosts(app, []v1alpha1.ResourceNode{{
hosts, err := ctrl.getAppHosts(&v1alpha1.Cluster{Server: "test", Name: "test"}, app, []v1alpha1.ResourceNode{{
ResourceRef: v1alpha1.ResourceRef{Name: "pod1", Namespace: "default", Kind: kube.PodKind},
Info: []v1alpha1.InfoItem{{
Name: "Host",
Expand Down
6 changes: 3 additions & 3 deletions controller/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ type LiveStateCache interface {
// Executes give callback against resources specified by the keys and all its children
IterateHierarchyV2(server string, keys []kube.ResourceKey, action func(child appv1.ResourceNode, appName string) bool) error
// Returns state of live nodes which correspond for target nodes of specified application.
GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
GetManagedLiveObjs(destCluster *appv1.Cluster, a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error)
// IterateResources iterates all resource stored in cache
IterateResources(server string, callback func(res *clustercache.Resource, info *ResourceInfo)) error
// Returns all top level resources (resources without owner references) of a specified namespace
Expand Down Expand Up @@ -695,8 +695,8 @@ func (c *liveStateCache) GetNamespaceTopLevelResources(server string, namespace
return res, nil
}

func (c *liveStateCache) GetManagedLiveObjs(a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
clusterInfo, err := c.getSyncedCluster(a.Spec.Destination.Server)
func (c *liveStateCache) GetManagedLiveObjs(destCluster *appv1.Cluster, a *appv1.Application, targetObjs []*unstructured.Unstructured) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
clusterInfo, err := c.getSyncedCluster(destCluster.Server)
if err != nil {
return nil, fmt.Errorf("failed to get cluster info for %q: %w", a.Spec.Destination.Server, err)
}
Expand Down
2 changes: 1 addition & 1 deletion controller/cache/mocks/LiveStateCache.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions controller/hook.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func isPostDeleteHook(obj *unstructured.Unstructured) bool {
return false
}

func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Application, proj *v1alpha1.AppProject, liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) {
func (ctrl *ApplicationController) executePostDeleteHooks(destCluster *v1alpha1.Cluster, app *v1alpha1.Application, proj *v1alpha1.AppProject, liveObjs map[kube.ResourceKey]*unstructured.Unstructured, config *rest.Config, logCtx *log.Entry) (bool, error) {
appLabelKey, err := ctrl.settingsMgr.GetAppInstanceLabelKey()
if err != nil {
return false, err
Expand All @@ -51,7 +51,7 @@ func (ctrl *ApplicationController) executePostDeleteHooks(app *v1alpha1.Applicat
revisions = append(revisions, src.TargetRevision)
}

targets, _, _, err := ctrl.appStateManager.GetRepoObjs(app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
targets, _, _, err := ctrl.appStateManager.GetRepoObjs(destCluster, app, app.Spec.GetSources(), appLabelKey, revisions, false, false, false, proj, false)
if err != nil {
return false, err
}
Expand Down
Loading

0 comments on commit f40cad8

Please sign in to comment.