diff --git a/.golangci.yaml b/.golangci.yaml index 34a2b6eb..62434899 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -37,11 +37,6 @@ linters-settings: - prefix(github.com/flant/) goimports: local-prefixes: github.com/flant/ - # TODO: replace deprecated methods - # ignore deprecated - # https://staticcheck.dev/docs/checks/#SA1019 - staticcheck: - checks: ["all","-SA1019"] sloglint: # Enforce not mixing key-value pairs and attributes. no-mixed-args: true diff --git a/cmd/addon-operator/main.go b/cmd/addon-operator/main.go index d5488923..a4d6e9ef 100644 --- a/cmd/addon-operator/main.go +++ b/cmd/addon-operator/main.go @@ -95,11 +95,11 @@ func run(ctx context.Context, operator *addon_operator.AddonOperator) error { operator.SetupKubeConfigManager(bk) if err := operator.Setup(); err != nil { - operator.Logger.Fatalf("setup failed: %s\n", err) + operator.Logger.Fatal("setup failed", log.Err(err)) } if err := operator.Start(ctx); err != nil { - operator.Logger.Fatalf("start failed: %s\n", err) + operator.Logger.Fatal("start failed", log.Err(err)) } // Block action by waiting signals from OS. diff --git a/examples/700-go-hook/global-hooks/global-go-hook.go b/examples/700-go-hook/global-hooks/global-go-hook.go index 7b7c76cd..9868bca6 100644 --- a/examples/700-go-hook/global-hooks/global-go-hook.go +++ b/examples/700-go-hook/global-hooks/global-go-hook.go @@ -10,6 +10,6 @@ var _ = sdk.RegisterFunc(&gohook.HookConfig{ }, handler) func handler(input *gohook.HookInput) error { - input.Logger.Infof("Start Global Go hook") + input.Logger.Info("Start Global Go hook") return nil } diff --git a/examples/700-go-hook/modules/001-module-go-hooks/hooks/go_hooks.go b/examples/700-go-hook/modules/001-module-go-hooks/hooks/go_hooks.go index 5b8764c4..b4ef7533 100644 --- a/examples/700-go-hook/modules/001-module-go-hooks/hooks/go_hooks.go +++ b/examples/700-go-hook/modules/001-module-go-hooks/hooks/go_hooks.go @@ -1,6 +1,8 @@ package hooks import ( + "fmt" + "log/slog" "time" v1 "k8s.io/api/core/v1" @@ -54,11 +56,12 @@ func ObjFilter(obj *unstructured.Unstructured) (gohook.FilterResult, error) { func run(input *gohook.HookInput) error { for _, o := range input.Snapshots["pods"] { podSpec := o.(*podSpecFilteredObj) - input.Logger.Infof("Got podSpec: %+v", podSpec) + input.Logger.Info("Got podSpec", + slog.String("spec", fmt.Sprintf("%+v", podSpec))) } - input.Logger.Infof("Hello from on_kube.pods2! I have %d snapshots\n", - len(input.Snapshots)) + input.Logger.Info("Hello from on_kube.pods2! I have snapshots", + slog.Int("count", len(input.Snapshots))) input.MetricsCollector.Add("addon_go_hooks_total", 1.0, nil) diff --git a/pkg/addon-operator/admission_http_server.go b/pkg/addon-operator/admission_http_server.go index f9dea6af..3a6a2e79 100644 --- a/pkg/addon-operator/admission_http_server.go +++ b/pkg/addon-operator/admission_http_server.go @@ -3,6 +3,7 @@ package addon_operator import ( "context" "fmt" + "log/slog" "net/http" "path" "time" @@ -27,7 +28,8 @@ func NewAdmissionServer(listenPort, certsDir string) *AdmissionServer { func (as *AdmissionServer) RegisterHandler(route string, handler http.Handler) { if _, ok := as.routes[route]; ok { - log.Fatalf("Route %q is already registered", route) + log.Fatal("Route is already registered", + slog.String("route", route)) } as.routes[route] = handler @@ -41,7 +43,8 @@ func (as *AdmissionServer) start(ctx context.Context) { mux.Handle(route, handler) } - log.Debugf("Registered admission routes: %v", as.routes) + log.Debug("Registered admission routes", + slog.String("routes", fmt.Sprintf("%v", as.routes))) srv := &http.Server{ Addr: fmt.Sprintf(":%s", as.listenPort), @@ -67,7 +70,7 @@ func (as *AdmissionServer) start(ctx context.Context) { cancel() }() if err := srv.Shutdown(ctx); err != nil { - log.Fatalf("Server Shutdown Failed:%+v", err) + log.Fatal("Server Shutdown Failed", log.Err(err)) } }() } diff --git a/pkg/addon-operator/bootstrap.go b/pkg/addon-operator/bootstrap.go index 2ddafbe2..1544f88d 100644 --- a/pkg/addon-operator/bootstrap.go +++ b/pkg/addon-operator/bootstrap.go @@ -2,6 +2,7 @@ package addon_operator import ( "fmt" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" @@ -18,22 +19,24 @@ import ( func (op *AddonOperator) bootstrap() error { log.Info(shapp.AppStartMessage) - log.Infof("Search modules in: %s", app.ModulesDir) + log.Info("Search modules", + slog.String("path", app.ModulesDir)) - log.Infof("Addon-operator namespace: %s", op.DefaultNamespace) + log.Info("Addon-operator namespace", + slog.String("namespace", op.DefaultNamespace)) // Debug server. // TODO: rewrite shapp global variables to the addon-operator ones var err error op.DebugServer, err = shell_operator.RunDefaultDebugServer(shapp.DebugUnixSocket, shapp.DebugHttpServerAddr, op.Logger.Named("debug-server")) if err != nil { - log.Errorf("Fatal: start Debug server: %s", err) + log.Error("Fatal: start Debug server", log.Err(err)) return fmt.Errorf("start Debug server: %w", err) } err = op.Assemble(op.DebugServer) if err != nil { - log.Errorf("Fatal: %s", err) + log.Error("Fatal", log.Err(err)) return fmt.Errorf("assemble Debug server: %w", err) } @@ -68,7 +71,7 @@ func (op *AddonOperator) Assemble(debugServer *debug.Server) (err error) { // SetupKubeConfigManager sets manager, which reads configuration for Modules from a cluster func (op *AddonOperator) SetupKubeConfigManager(bk backend.ConfigHandler) { if op.KubeConfigManager != nil { - log.Warnf("KubeConfigManager is already set") + log.Warn("KubeConfigManager is already set") // return if kube config manager is already set return } diff --git a/pkg/addon-operator/operator.go b/pkg/addon-operator/operator.go index 88af829b..93b8ff24 100644 --- a/pkg/addon-operator/operator.go +++ b/pkg/addon-operator/operator.go @@ -154,9 +154,9 @@ func NewAddonOperator(ctx context.Context, opts ...Option) *AddonOperator { DefaultNamespace: app.Namespace, ConvergeState: converge.NewConvergeState(), parallelTaskChannels: parallelTaskChannels{ - channels: make(map[string](chan parallelQueueEvent)), + channels: make(map[string]chan parallelQueueEvent), }, - discoveredGVKs: make(map[string]struct{}, 0), + discoveredGVKs: make(map[string]struct{}), } for _, opt := range opts { @@ -245,7 +245,8 @@ func (op *AddonOperator) Setup() error { if err != nil { return fmt.Errorf("global hooks directory: %s", err) } - log.Infof("global hooks directory: %s", globalHooksDir) + log.Info("global hooks directory", + slog.String("dir", globalHooksDir)) tempDir, err := ensureTempDirectory(shapp.TempDir) if err != nil { @@ -437,7 +438,8 @@ func (op *AddonOperator) RegisterManagerEventsHandlers() { "binding": string(htypes.Schedule), } logEntry := utils.EnrichLoggerWithLabels(op.Logger, logLabels) - logEntry.Debugf("Create tasks for 'schedule' event '%s'", crontab) + logEntry.Debug("Create tasks for 'schedule' event", + slog.String("event", crontab)) var tasks []sh_task.Task op.ModuleManager.HandleScheduleEvent(crontab, @@ -508,7 +510,8 @@ func (op *AddonOperator) RegisterManagerEventsHandlers() { "binding": string(htypes.OnKubernetesEvent), } logEntry := utils.EnrichLoggerWithLabels(op.Logger, logLabels) - logEntry.Debugf("Create tasks for 'kubernetes' event '%s'", kubeEvent.String()) + logEntry.Debug("Create tasks for 'kubernetes' event", + slog.String("event", kubeEvent.String())) var tasks []sh_task.Task op.ModuleManager.HandleKubeEvent(kubeEvent, @@ -735,7 +738,7 @@ func (op *AddonOperator) CreatePurgeTasks(modulesToPurge []string, t sh_task.Tas return newTasks } -// ApplyKubeConfigValues +// HandleApplyKubeConfigValues ... func (op *AddonOperator) HandleApplyKubeConfigValues(t sh_task.Task, logLabels map[string]string) (res queue.TaskResult) { var handleErr error defer trace.StartRegion(context.Background(), "HandleApplyKubeConfigValues").End() @@ -749,7 +752,9 @@ func (op *AddonOperator) HandleApplyKubeConfigValues(t sh_task.Task, logLabels m if handleErr != nil { res.Status = queue.Fail - logEntry.Errorf("HandleApplyKubeConfigValues failed, requeue task to retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, handleErr) + logEntry.Error("HandleApplyKubeConfigValues failed, requeue task to retry after delay.", + slog.Int("count", t.GetFailureCount()+1), + log.Err(handleErr)) op.engine.MetricStorage.CounterAdd("{PREFIX}modules_discover_errors_total", 1.0, map[string]string{}) t.UpdateFailureMessage(handleErr.Error()) t.WithQueuedAt(time.Now()) @@ -758,7 +763,7 @@ func (op *AddonOperator) HandleApplyKubeConfigValues(t sh_task.Task, logLabels m res.Status = queue.Success - logEntry.Debugf("HandleApplyKubeConfigValues success") + logEntry.Debug("HandleApplyKubeConfigValues success") return } @@ -769,7 +774,8 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str taskEvent, ok := t.GetProp(converge.ConvergeEventProp).(converge.ConvergeEvent) if !ok { - logEntry.Errorf("Possible bug! Wrong prop type in ConvergeModules: got %T(%#[1]v) instead string.", t.GetProp("event")) + logEntry.Error("Possible bug! Wrong prop type in ConvergeModules: got another type instead string.", + slog.String("type", fmt.Sprintf("%T(%#[1]v)", t.GetProp("event")))) res.Status = queue.Fail return res } @@ -781,7 +787,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str op.ConvergeState.PhaseLock.Lock() defer op.ConvergeState.PhaseLock.Unlock() if op.ConvergeState.Phase == converge.StandBy { - logEntry.Debugf("ConvergeModules: start") + logEntry.Debug("ConvergeModules: start") // Deduplicate tasks: remove ConvergeModules tasks right after the current task. RemoveAdjacentConvergeModules(op.engine.TaskQueues.GetByName(t.GetQueueName()), t.GetId(), logLabels, op.Logger) @@ -801,7 +807,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str } if op.ConvergeState.Phase == converge.WaitBeforeAll { - logEntry.Infof("ConvergeModules: beforeAll hooks done, run modules") + logEntry.Info("ConvergeModules: beforeAll hooks done, run modules") var state *module_manager.ModulesState state, handleErr = op.ModuleManager.RefreshEnabledState(t.GetLogLabels()) @@ -829,7 +835,7 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str } if op.ConvergeState.Phase == converge.WaitDeleteAndRunModules { - logEntry.Infof("ConvergeModules: ModuleRun tasks done, execute AfterAll global hooks") + logEntry.Info("ConvergeModules: ModuleRun tasks done, execute AfterAll global hooks") // Put AfterAll tasks before current task. tasks, handleErr := op.CreateAfterAllTasks(t.GetLogLabels(), hm.EventDescription) if handleErr == nil { @@ -846,21 +852,24 @@ func (op *AddonOperator) HandleConvergeModules(t sh_task.Task, logLabels map[str // It is the last phase of ConvergeModules task, reset operator's Converge phase. if op.ConvergeState.Phase == converge.WaitAfterAll { op.ConvergeState.Phase = converge.StandBy - logEntry.Infof("ConvergeModules task done") + logEntry.Info("ConvergeModules task done") res.Status = queue.Success return res } if handleErr != nil { res.Status = queue.Fail - logEntry.Errorf("ConvergeModules failed in phase '%s', requeue task to retry after delay. Failed count is %d. Error: %s", op.ConvergeState.Phase, t.GetFailureCount()+1, handleErr) + logEntry.Error("ConvergeModules failed, requeue task to retry after delay.", + slog.String("phase", string(op.ConvergeState.Phase)), + slog.Int("count", t.GetFailureCount()+1), + log.Err(handleErr)) op.engine.MetricStorage.CounterAdd("{PREFIX}modules_discover_errors_total", 1.0, map[string]string{}) t.UpdateFailureMessage(handleErr.Error()) t.WithQueuedAt(time.Now()) return res } - logEntry.Debugf("ConvergeModules success") + logEntry.Debug("ConvergeModules success") res.Status = queue.Success return res } @@ -971,12 +980,16 @@ func (op *AddonOperator) CreateAndStartQueuesForGlobalHooks() { h := op.ModuleManager.GetGlobalHook(hookName) for _, hookBinding := range h.GetHookConfig().Schedules { if op.CreateAndStartQueue(hookBinding.Queue) { - log.Debugf("Queue '%s' started for global 'schedule' hook %s", hookBinding.Queue, hookName) + log.Debug("Queue started for global 'schedule' hook", + slog.String("queue", hookBinding.Queue), + slog.String("hook", hookName)) } } for _, hookBinding := range h.GetHookConfig().OnKubernetesEvents { if op.CreateAndStartQueue(hookBinding.Queue) { - log.Debugf("Queue '%s' started for global 'kubernetes' hook %s", hookBinding.Queue, hookName) + log.Debug("Queue started for global 'kubernetes' hook", + slog.String("queue", hookBinding.Queue), + slog.String("hook", hookName)) } } } @@ -995,7 +1008,9 @@ func (op *AddonOperator) CreateAndStartQueuesForModuleHooks(moduleName string) { for _, hook := range scheduleHooks { for _, hookBinding := range hook.GetHookConfig().Schedules { if op.CreateAndStartQueue(hookBinding.Queue) { - log.Debugf("Queue '%s' started for module 'schedule' hook %s", hookBinding.Queue, hook.GetName()) + log.Debug("Queue started for module 'schedule'", + slog.String("queue", hookBinding.Queue), + slog.String("hook", hook.GetName())) } } } @@ -1004,7 +1019,9 @@ func (op *AddonOperator) CreateAndStartQueuesForModuleHooks(moduleName string) { for _, hook := range kubeEventsHooks { for _, hookBinding := range hook.GetHookConfig().OnKubernetesEvents { if op.CreateAndStartQueue(hookBinding.Queue) { - log.Debugf("Queue '%s' started for module 'kubernetes' hook %s", hookBinding.Queue, hook.GetName()) + log.Debug("Queue started for module 'kubernetes'", + slog.String("queue", hookBinding.Queue), + slog.String("hook", hook.GetName())) } } } @@ -1031,7 +1048,7 @@ func (op *AddonOperator) CreateAndStartParallelQueues() { for i := 0; i < app.NumberOfParallelQueues; i++ { queueName := fmt.Sprintf(app.ParallelQueueNamePattern, i) if op.engine.TaskQueues.GetByName(queueName) != nil { - log.Warnf("Parallel queue %s already exists", queueName) + log.Warn("Parallel queue already exists", slog.String("queue", queueName)) continue } op.engine.TaskQueues.NewNamedQueue(queueName, op.ParallelTasksHandler) @@ -1042,7 +1059,7 @@ func (op *AddonOperator) CreateAndStartParallelQueues() { func (op *AddonOperator) DrainModuleQueues(modName string) { m := op.ModuleManager.GetModule(modName) if m == nil { - log.Warnf("Module %q is absent when we try to drain its queue", modName) + log.Warn("Module is absent when we try to drain its queue", slog.String("module", modName)) return } @@ -1089,7 +1106,7 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { eventLogEntry := utils.EnrichLoggerWithLabels(logEntry, logLabels) // if global hooks haven't been run yet, script enabled extender fails due to missing global values if op.globalHooksNotExecutedYet() { - eventLogEntry.Infof("Global hook dynamic modification detected, ignore until starting first converge") + eventLogEntry.Info("Global hook dynamic modification detected, ignore until starting first converge") break } @@ -1104,12 +1121,13 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { ) // if converge has already begun - restart it immediately if op.engine.TaskQueues.GetMain().Length() > 0 && RemoveCurrentConvergeTasks(op.getConvergeQueues(), logLabels, op.Logger) && op.ConvergeState.Phase != converge.StandBy { - logEntry.Infof("ConvergeModules: global hook dynamic modification detected, restart current converge process (%s)", op.ConvergeState.Phase) + logEntry.Info("ConvergeModules: global hook dynamic modification detected, restart current converge process", + slog.String("phase", string(op.ConvergeState.Phase))) op.engine.TaskQueues.GetMain().AddFirst(convergeTask) op.logTaskAdd(eventLogEntry, "DynamicExtender is updated, put first", convergeTask) } else { // if convege hasn't started - make way for global hooks and etc - logEntry.Infof("ConvergeModules: global hook dynamic modification detected, rerun all modules required") + logEntry.Info("ConvergeModules: global hook dynamic modification detected, rerun all modules required") op.engine.TaskQueues.GetMain().AddLast(convergeTask) } // ConvergeModules may be in progress, Reset converge state. @@ -1128,12 +1146,15 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { switch event.Type { case config.KubeConfigInvalid: op.ModuleManager.SetKubeConfigValid(false) - eventLogEntry.Infof("KubeConfig become invalid") + eventLogEntry.Info("KubeConfig become invalid") case config.KubeConfigChanged: - eventLogEntry.Debugf("ModuleManagerEventHandler-KubeConfigChanged: GlobalSectionChanged %v, ModuleValuesChanged %s, ModuleEnabledStateChanged %s", event.GlobalSectionChanged, event.ModuleValuesChanged, event.ModuleEnabledStateChanged) + eventLogEntry.Debug("ModuleManagerEventHandler-KubeConfigChanged", + slog.Bool("globalSectionChanged", event.GlobalSectionChanged), + slog.Any("moduleValuesChanged", event.ModuleValuesChanged), + slog.Any("moduleEnabledStateChanged", event.ModuleEnabledStateChanged)) if !op.ModuleManager.GetKubeConfigValid() { - eventLogEntry.Infof("KubeConfig become valid") + eventLogEntry.Info("KubeConfig become valid") } // Config is valid now, add task to update ModuleManager state. op.ModuleManager.SetKubeConfigValid(true) @@ -1160,7 +1181,7 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { op.engine.TaskQueues.GetMain().CancelTaskDelay() op.logTaskAdd(eventLogEntry, "KubeConfigExtender is updated, put first", kubeConfigTask) } - eventLogEntry.Infof("Kube config modification detected, ignore until starting first converge") + eventLogEntry.Info("Kube config modification detected, ignore until starting first converge") break } @@ -1176,7 +1197,8 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { ) // if main queue isn't empty and there was another convergeModules task: if op.engine.TaskQueues.GetMain().Length() > 0 && RemoveCurrentConvergeTasks(op.getConvergeQueues(), logLabels, op.Logger) { - logEntry.Infof("ConvergeModules: kube config modification detected, restart current converge process (%s)", op.ConvergeState.Phase) + logEntry.Info("ConvergeModules: kube config modification detected, restart current converge process", + slog.String("phase", string(op.ConvergeState.Phase))) // put ApplyKubeConfig->NewConvergeModulesTask sequence in the beginning of the main queue if kubeConfigTask != nil { op.engine.TaskQueues.GetMain().AddFirst(kubeConfigTask) @@ -1193,7 +1215,7 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { if kubeConfigTask != nil { op.engine.TaskQueues.GetMain().AddFirst(kubeConfigTask) } - logEntry.Infof("ConvergeModules: kube config modification detected, rerun all modules required") + logEntry.Info("ConvergeModules: kube config modification detected, rerun all modules required") op.engine.TaskQueues.GetMain().AddLast(convergeTask) } // ConvergeModules may be in progress, Reset converge state. @@ -1214,7 +1236,9 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { for i := len(reloadTasks) - 1; i >= 0; i-- { op.engine.TaskQueues.GetMain().AddAfter(kubeConfigTask.GetId(), reloadTasks[i]) } - logEntry.Infof("ConvergeModules: kube config modification detected, append %d tasks to rerun modules %+v", len(reloadTasks), modulesToRerun) + logEntry.Info("ConvergeModules: kube config modification detected, append tasks to rerun modules", + slog.Int("count", len(reloadTasks)), + slog.Any("modules", modulesToRerun)) op.logTaskAdd(logEntry, "tail", reloadTasks...) } } @@ -1258,7 +1282,8 @@ func (op *AddonOperator) StartModuleManagerEventHandler() { op.engine.TaskQueues.GetMain().AddLast(newTask.WithQueuedAt(time.Now())) op.logTaskAdd(logEntry, fmt.Sprintf("detected %s, append", additionalDescription), newTask) } else { - eventLogEntry.With("task.flow", "noop").Infof("Detected %s, ModuleRun task already queued", additionalDescription) + eventLogEntry.With("task.flow", "noop").Info("Detected event, ModuleRun task already queued", + slog.String("description", additionalDescription)) } } } @@ -1436,7 +1461,7 @@ func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task defer trace.StartRegion(context.Background(), "DiscoverHelmReleases").End() logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels) - logEntry.Debugf("Global hook enable kubernetes bindings") + logEntry.Debug("Global hook enable kubernetes bindings") hm := task.HookMetadataAccessor(t) globalHook := op.ModuleManager.GetGlobalHook(hm.HookName) @@ -1501,7 +1526,9 @@ func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task "queue": t.GetQueueName(), "activation": "OperatorStartup", }) - logEntry.Errorf("Global hook enable kubernetes bindings failed, requeue task to retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + logEntry.Error("Global hook enable kubernetes bindings failed, requeue task to retry after delay.", + slog.Int("count", t.GetFailureCount()+1), + log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(queuedAt) res.Status = queue.Fail @@ -1510,13 +1537,14 @@ func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task // Substitute current task with Synchronization tasks for the main queue. // Other Synchronization tasks are queued into specified queues. // Informers can be started now — their events will be added to the queue tail. - logEntry.Debugf("Global hook enable kubernetes bindings success") + logEntry.Debug("Global hook enable kubernetes bindings success") // "Wait" tasks are queued first for _, tsk := range parallelSyncTasksToWait { q := op.engine.TaskQueues.GetByName(tsk.GetQueueName()) if q == nil { - log.Errorf("Queue %s is not created while run GlobalHookEnableKubernetesBindings task!", tsk.GetQueueName()) + log.Error("Queue is not created while run GlobalHookEnableKubernetesBindings task!", + slog.String("queue", tsk.GetQueueName())) } else { // Skip state creation if WaitForSynchronization is disabled. thm := task.HookMetadataAccessor(tsk) @@ -1529,7 +1557,8 @@ func (op *AddonOperator) HandleGlobalHookEnableKubernetesBindings(t sh_task.Task for _, tsk := range parallelSyncTasks { q := op.engine.TaskQueues.GetByName(tsk.GetQueueName()) if q == nil { - log.Errorf("Queue %s is not created while run GlobalHookEnableKubernetesBindings task!", tsk.GetQueueName()) + log.Error("Queue is not created while run GlobalHookEnableKubernetesBindings task!", + slog.String("queue", tsk.GetQueueName())) } else { q.AddLast(tsk) } @@ -1550,12 +1579,14 @@ func (op *AddonOperator) HandleDiscoverHelmReleases(t sh_task.Task, labels map[s defer trace.StartRegion(context.Background(), "DiscoverHelmReleases").End() logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels) - logEntry.Debugf("Discover Helm releases state") + logEntry.Debug("Discover Helm releases state") state, err := op.ModuleManager.RefreshStateFromHelmReleases(t.GetLogLabels()) if err != nil { res.Status = queue.Fail - logEntry.Errorf("Discover helm releases failed, requeue task to retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + logEntry.Error("Discover helm releases failed, requeue task to retry after delay.", + slog.Int("count", t.GetFailureCount()+1), + log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) return @@ -1573,15 +1604,15 @@ func (op *AddonOperator) HandleModulePurge(t sh_task.Task, labels map[string]str defer trace.StartRegion(context.Background(), "ModulePurge").End() logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels) - logEntry.Debugf("Module purge start") + logEntry.Debug("Module purge start") hm := task.HookMetadataAccessor(t) err := op.Helm.NewClient(op.Logger.Named("helm-client"), t.GetLogLabels()).DeleteRelease(hm.ModuleName) if err != nil { // Purge is for unknown modules, just print warning. - logEntry.Warnf("Module purge failed, no retry. Error: %s", err) + logEntry.Warn("Module purge failed, no retry.", log.Err(err)) } else { - logEntry.Debugf("Module purge success") + logEntry.Debug("Module purge success") } status = queue.Success @@ -1598,7 +1629,7 @@ func (op *AddonOperator) HandleModuleDelete(t sh_task.Task, labels map[string]st baseModule := op.ModuleManager.GetModule(hm.ModuleName) logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels) - logEntry.Debugf("Module delete '%s'", hm.ModuleName) + logEntry.Debug("Module delete", slog.String("name", hm.ModuleName)) // Register module hooks to run afterHelmDelete hooks on startup. // It's a noop if registration is done before. @@ -1618,12 +1649,14 @@ func (op *AddonOperator) HandleModuleDelete(t sh_task.Task, labels map[string]st if err != nil { op.engine.MetricStorage.CounterAdd("{PREFIX}module_delete_errors_total", 1.0, map[string]string{"module": hm.ModuleName}) - logEntry.Errorf("Module delete failed, requeue task to retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + logEntry.Error("Module delete failed, requeue task to retry after delay.", + slog.Int("count", t.GetFailureCount()+1), + log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) status = queue.Fail } else { - logEntry.Debugf("Module delete success '%s'", hm.ModuleName) + logEntry.Debug("Module delete success", slog.String("name", hm.ModuleName)) status = queue.Success } @@ -1640,11 +1673,11 @@ func (op *AddonOperator) HandleModuleEnsureCRDs(t sh_task.Task, labels map[strin baseModule := op.ModuleManager.GetModule(hm.ModuleName) logEntry := utils.EnrichLoggerWithLabels(op.Logger, labels) - logEntry.Debugf("Module ensureCRDs '%s'", hm.ModuleName) + logEntry.Debug("Module ensureCRDs", slog.String("name", hm.ModuleName)) if appliedGVKs, err := op.EnsureCRDs(baseModule); err != nil { op.ModuleManager.UpdateModuleLastErrorAndNotify(baseModule, err) - logEntry.Errorf("ModuleEnsureCRDs failed. Error: %s", err) + logEntry.Error("ModuleEnsureCRDs failed.", log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) res.Status = queue.Fail @@ -1666,7 +1699,8 @@ func (op *AddonOperator) HandleParallelModuleRun(t sh_task.Task, labels map[stri hm := task.HookMetadataAccessor(t) if hm.ParallelRunMetadata == nil { - logEntry.Errorf("Possible bug! Couldn't get task ParallelRunMetadata for a parallel task: %s", hm.EventDescription) + logEntry.Error("Possible bug! Couldn't get task ParallelRunMetadata for a parallel task.", + slog.String("description", hm.EventDescription)) res.Status = queue.Fail return res } @@ -1674,7 +1708,8 @@ func (op *AddonOperator) HandleParallelModuleRun(t sh_task.Task, labels map[stri i := 0 parallelChannel := make(chan parallelQueueEvent) op.parallelTaskChannels.Set(t.GetId(), parallelChannel) - logEntry.Debugf("ParallelModuleRun available parallel event channels %v", op.parallelTaskChannels.channels) + logEntry.Debug("ParallelModuleRun available parallel event channels", + slog.String("channels", fmt.Sprintf("%v", op.parallelTaskChannels.channels))) for moduleName, moduleMetadata := range hm.ParallelRunMetadata.GetModulesMetadata() { queueName := fmt.Sprintf(app.ParallelQueueNamePattern, i%(app.NumberOfParallelQueues-1)) newLogLabels := utils.MergeLabels(labels) @@ -1702,7 +1737,8 @@ L: for { select { case parallelEvent := <-parallelChannel: - logEntry.Debugf("ParallelModuleRun event '%v' received", parallelEvent) + logEntry.Debug("ParallelModuleRun event received", + slog.String("event", fmt.Sprintf("%v", parallelEvent))) if len(parallelEvent.errMsg) != 0 { if tasksErrors[parallelEvent.moduleName] != parallelEvent.errMsg { tasksErrors[parallelEvent.moduleName] = parallelEvent.errMsg @@ -1806,7 +1842,8 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin moduleRunErr = op.ModuleManager.RegisterModuleHooks(baseModule, labels) if moduleRunErr == nil { if hm.DoModuleStartup { - logEntry.Debugf("ModuleRun '%s' phase", baseModule.GetPhase()) + logEntry.Debug("ModuleRun phase", + slog.String("phase", string(baseModule.GetPhase()))) treg := trace.StartRegion(context.Background(), "ModuleRun-OnStartup") @@ -1826,7 +1863,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin } if baseModule.GetPhase() == modules.OnStartupDone { - logEntry.Debugf("ModuleRun '%s' phase", baseModule.GetPhase()) + logEntry.Debug("ModuleRun phase", slog.String("phase", string(baseModule.GetPhase()))) if baseModule.HasKubernetesHooks() { op.ModuleManager.SetModulePhaseAndNotify(baseModule, modules.QueueSynchronizationTasks) } else { @@ -1837,7 +1874,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin // Note: All hooks should be queued to fill snapshots before proceed to beforeHelm hooks. if baseModule.GetPhase() == modules.QueueSynchronizationTasks { - logEntry.Debugf("ModuleRun '%s' phase", baseModule.GetPhase()) + logEntry.Debug("ModuleRun phase", slog.String("phase", string(baseModule.GetPhase()))) // ModuleHookRun.Synchronization tasks for bindings with the "main" queue. mainSyncTasks := make([]sh_task.Task, 0) @@ -1910,7 +1947,8 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin for _, tsk := range parallelSyncTasksToWait { q := op.engine.TaskQueues.GetByName(tsk.GetQueueName()) if q == nil { - logEntry.Errorf("queue %s is not found while EnableKubernetesBindings task", tsk.GetQueueName()) + logEntry.Error("queue is not found while EnableKubernetesBindings task", + slog.String("queue", tsk.GetQueueName())) } else { thm := task.HookMetadataAccessor(tsk) q.AddLast(tsk) @@ -1923,7 +1961,8 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin for _, tsk := range parallelSyncTasks { q := op.engine.TaskQueues.GetByName(tsk.GetQueueName()) if q == nil { - logEntry.Errorf("queue %s is not found while EnableKubernetesBindings task", tsk.GetQueueName()) + logEntry.Error("queue is not found while EnableKubernetesBindings task", + slog.String("queue", tsk.GetQueueName())) } else { q.AddLast(tsk) } @@ -1939,7 +1978,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin op.ModuleManager.SetModulePhaseAndNotify(baseModule, modules.WaitForSynchronization) logEntry.With("module.state", "wait-for-synchronization"). - Debugf("ModuleRun wait for Synchronization") + Debug("ModuleRun wait for Synchronization") } // Put Synchronization tasks for kubernetes hooks before ModuleRun task. @@ -1961,10 +2000,14 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin } else { // Debug messages every fifth second: print Synchronization state. if time.Now().UnixNano()%5000000000 == 0 { - logEntry.Debugf("ModuleRun wait Synchronization state: moduleStartup:%v syncNeeded:%v syncQueued:%v syncDone:%v", hm.DoModuleStartup, baseModule.SynchronizationNeeded(), baseModule.Synchronization().HasQueued(), baseModule.Synchronization().IsCompleted()) + logEntry.Debug("ModuleRun wait Synchronization state", + slog.Bool("moduleStartup", hm.DoModuleStartup), + slog.Bool("syncNeeded", baseModule.SynchronizationNeeded()), + slog.Bool("syncQueued", baseModule.Synchronization().HasQueued()), + slog.Bool("syncDone", baseModule.Synchronization().IsCompleted())) baseModule.Synchronization().DebugDumpState(logEntry) } - logEntry.Debugf("Synchronization not completed, keep ModuleRun task in repeat mode") + logEntry.Debug("Synchronization not completed, keep ModuleRun task in repeat mode") t.WithQueuedAt(time.Now()) res.Status = queue.Repeat return @@ -1973,7 +2016,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin // Enable schedule events once at module start. if baseModule.GetPhase() == modules.EnableScheduleBindings { - logEntry.Debugf("ModuleRun '%s' phase", baseModule.GetPhase()) + logEntry.Debug("ModuleRun phase", slog.String("phase", string(baseModule.GetPhase()))) op.ModuleManager.EnableModuleScheduleBindings(hm.ModuleName) op.ModuleManager.SetModulePhaseAndNotify(baseModule, modules.CanRunHelm) @@ -1981,7 +2024,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin // Module start is done, module is ready to run hooks and helm chart. if baseModule.GetPhase() == modules.CanRunHelm { - logEntry.Debugf("ModuleRun '%s' phase", baseModule.GetPhase()) + logEntry.Debug("ModuleRun phase", slog.String("phase", string(baseModule.GetPhase()))) // run beforeHelm, helm, afterHelm valuesChanged, moduleRunErr = op.ModuleManager.RunModule(baseModule.Name, t.GetLogLabels()) } @@ -1989,14 +2032,17 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin op.ModuleManager.UpdateModuleLastErrorAndNotify(baseModule, moduleRunErr) if moduleRunErr != nil { res.Status = queue.Fail - logEntry.Errorf("ModuleRun failed in phase '%s'. Requeue task to retry after delay. Failed count is %d. Error: %s", baseModule.GetPhase(), t.GetFailureCount()+1, moduleRunErr) + logEntry.Error("ModuleRun failed. Requeue task to retry after delay.", + slog.String("phase", string(baseModule.GetPhase())), + slog.Int("count", t.GetFailureCount()+1), + log.Err(moduleRunErr)) op.engine.MetricStorage.CounterAdd("{PREFIX}module_run_errors_total", 1.0, map[string]string{"module": hm.ModuleName}) t.UpdateFailureMessage(moduleRunErr.Error()) t.WithQueuedAt(time.Now()) } else { res.Status = queue.Success if valuesChanged { - logEntry.Infof("ModuleRun success, values changed, restart module") + logEntry.Info("ModuleRun success, values changed, restart module") // One of afterHelm hooks changes values, run ModuleRun again: copy task, but disable startup hooks. hm.DoModuleStartup = false hm.EventDescription = "AfterHelm-Hooks-Change-Values" @@ -2010,7 +2056,7 @@ func (op *AddonOperator) HandleModuleRun(t sh_task.Task, labels map[string]strin res.AfterTasks = []sh_task.Task{newTask.WithQueuedAt(time.Now())} op.logTaskAdd(logEntry, "after", res.AfterTasks...) } else { - logEntry.Infof("ModuleRun success, module is ready") + logEntry.Info("ModuleRun success, module is ready") } } return @@ -2085,7 +2131,10 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s } // Task 'tsk' will be combined, so remove it from the SynchronizationState. if thm.IsSynchronization() { - logEntry.Debugf("Synchronization task for %s/%s is combined, mark it as Done: id=%s", thm.HookName, thm.Binding, thm.KubernetesBindingId) + logEntry.Debug("Synchronization task is combined, mark it as Done", + slog.String("name", thm.HookName), + slog.String("binding", thm.Binding), + slog.String("id", thm.KubernetesBindingId)) baseModule.Synchronization().DoneForBinding(thm.KubernetesBindingId) } return false // do not stop combine process on this task @@ -2097,7 +2146,7 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s if len(combineResult.MonitorIDs) > 0 { hm.MonitorIDs = append(hm.MonitorIDs, combineResult.MonitorIDs...) } - logEntry.Debugf("Got monitorIDs: %+v", hm.MonitorIDs) + logEntry.Debug("Got monitorIDs", slog.Any("monitorIDs", hm.MonitorIDs)) t.UpdateMetadata(hm) } } @@ -2119,12 +2168,14 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s if err != nil { if hm.AllowFailure { allowed = 1.0 - logEntry.Infof("Module hook failed, but allowed to fail. Error: %v", err) + logEntry.Info("Module hook failed, but allowed to fail.", log.Err(err)) res.Status = queue.Success op.ModuleManager.UpdateModuleHookStatusAndNotify(baseModule, hm.HookName, nil) } else { errors = 1.0 - logEntry.Errorf("Module hook failed, requeue task to retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + logEntry.Error("Module hook failed, requeue task to retry after delay.", + slog.Int("count", t.GetFailureCount()+1), + log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) res.Status = queue.Fail @@ -2132,7 +2183,7 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s } } else { success = 1.0 - logEntry.Debugf("Module hook success '%s'", hm.HookName) + logEntry.Debug("Module hook success", slog.String("name", hm.HookName)) res.Status = queue.Success op.ModuleManager.UpdateModuleHookStatusAndNotify(baseModule, hm.HookName, nil) @@ -2142,7 +2193,7 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s switch hm.BindingType { case htypes.Schedule: if beforeChecksum != afterChecksum { - logEntry.Infof("Module hook changed values, will restart ModuleRun.") + logEntry.Info("Module hook changed values, will restart ModuleRun.") reloadModule = true eventDescription = "Schedule-Change-ModuleValues" } @@ -2150,9 +2201,9 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s // Do not reload module on changes during Synchronization. if beforeChecksum != afterChecksum { if hm.IsSynchronization() { - logEntry.Infof("Module hook changed values, but restart ModuleRun is ignored for the Synchronization task.") + logEntry.Info("Module hook changed values, but restart ModuleRun is ignored for the Synchronization task.") } else { - logEntry.Infof("Module hook changed values, will restart ModuleRun.") + logEntry.Info("Module hook changed values, will restart ModuleRun.") reloadModule = true eventDescription = "Kubernetes-Change-ModuleValues" } @@ -2189,7 +2240,7 @@ func (op *AddonOperator) HandleModuleHookRun(t sh_task.Task, labels map[string]s op.engine.TaskQueues.GetMain().AddLast(newTask.WithQueuedAt(time.Now())) op.logTaskAdd(logEntry, "module values are changed, append", newTask) } else { - logEntry.With("task.flow", "noop").Infof("module values are changed, ModuleRun task already queued") + logEntry.With("task.flow", "noop").Info("module values are changed, ModuleRun task already queued") } } } @@ -2245,13 +2296,13 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s if isSynchronization { // Synchronization is not a part of v0 contract, skip hook execution. if taskHook.GetHookConfig().Version == "v0" { - logEntry.Infof("Execute on Synchronization ignored for v0 hooks") + logEntry.Info("Execute on Synchronization ignored for v0 hooks") shouldRunHook = false res.Status = queue.Success } // Check for "executeOnSynchronization: false". if !hm.ExecuteOnSynchronization { - logEntry.Infof("Execute on Synchronization disabled in hook config: ExecuteOnSynchronization=false") + logEntry.Info("Execute on Synchronization disabled in hook config: ExecuteOnSynchronization=false") shouldRunHook = false res.Status = queue.Success } @@ -2273,7 +2324,10 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s } // Task 'tsk' will be combined, so remove it from the GlobalSynchronizationState. if thm.IsSynchronization() { - logEntry.Debugf("Synchronization task for %s/%s is combined, mark it as Done: id=%s", thm.HookName, thm.Binding, thm.KubernetesBindingId) + logEntry.Debug("Synchronization task is combined, mark it as Done", + slog.String("name", thm.HookName), + slog.String("binding", thm.Binding), + slog.String("id", thm.KubernetesBindingId)) op.ModuleManager.GlobalSynchronizationState().DoneForBinding(thm.KubernetesBindingId) } return false // Combine tsk. @@ -2283,10 +2337,13 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s hm.BindingContext = combineResult.BindingContexts // Extra monitor IDs can be returned if several Synchronization binding contexts are combined. if len(combineResult.MonitorIDs) > 0 { - logEntry.Debugf("Task monitorID: %s, combined monitorIDs: %+v", hm.MonitorIDs, combineResult.MonitorIDs) + logEntry.Debug("Task monitorID. Combined monitorIDs.", + slog.Any("monitorIDs", hm.MonitorIDs), + slog.Any("combinedMonitorIDs", combineResult.MonitorIDs)) hm.MonitorIDs = combineResult.MonitorIDs } - logEntry.Debugf("Got monitorIDs: %+v", hm.MonitorIDs) + logEntry.Debug("Got monitorIDs", + slog.Any("monitorIDs", hm.MonitorIDs)) t.UpdateMetadata(hm) } } @@ -2295,7 +2352,7 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s // op.HelmResourcesManager.PauseMonitors() if shouldRunHook { - logEntry.Debugf("Global hook run") + logEntry.Debug("Global hook run") errors := 0.0 success := 0.0 @@ -2306,11 +2363,13 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s if err != nil { if hm.AllowFailure { allowed = 1.0 - logEntry.Infof("Global hook failed, but allowed to fail. Error: %v", err) + logEntry.Info("Global hook failed, but allowed to fail.", log.Err(err)) res.Status = queue.Success } else { errors = 1.0 - logEntry.Errorf("Global hook failed, requeue task to retry after delay. Failed count is %d. Error: %s", t.GetFailureCount()+1, err) + logEntry.Error("Global hook failed, requeue task to retry after delay.", + slog.Int("count", t.GetFailureCount()+1), + log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) res.Status = queue.Fail @@ -2318,7 +2377,10 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s } else { // Calculate new checksum of *Enabled values. success = 1.0 - logEntry.Debugf("GlobalHookRun success, checksums: before=%s after=%s saved=%s", beforeChecksum, afterChecksum, hm.ValuesChecksum) + logEntry.Debug("GlobalHookRun success", + slog.String("beforeChecksum", beforeChecksum), + slog.String("afterChecksum", afterChecksum), + slog.String("savedChecksum", hm.ValuesChecksum)) res.Status = queue.Success reloadAll := false @@ -2326,28 +2388,28 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s switch hm.BindingType { case htypes.Schedule: if beforeChecksum != afterChecksum { - logEntry.Infof("Global hook changed values, will run ReloadAll.") + logEntry.Info("Global hook changed values, will run ReloadAll.") reloadAll = true eventDescription = "Schedule-Change-GlobalValues" } case htypes.OnKubernetesEvent: if beforeChecksum != afterChecksum { if hm.ReloadAllOnValuesChanges { - logEntry.Infof("Global hook changed values, will run ReloadAll.") + logEntry.Info("Global hook changed values, will run ReloadAll.") reloadAll = true eventDescription = "Kubernetes-Change-GlobalValues" } else { - logEntry.Infof("Global hook changed values, but ReloadAll ignored for the Synchronization task.") + logEntry.Info("Global hook changed values, but ReloadAll ignored for the Synchronization task.") } } case hookTypes.AfterAll: if !hm.LastAfterAllHook && afterChecksum != beforeChecksum { - logEntry.Infof("Global hook changed values, but ReloadAll ignored: more AfterAll hooks to execute.") + logEntry.Info("Global hook changed values, but ReloadAll ignored: more AfterAll hooks to execute.") } // values are changed when afterAll hooks are executed if hm.LastAfterAllHook && afterChecksum != hm.ValuesChecksum { - logEntry.Infof("Global values changed by AfterAll hooks, will run ReloadAll.") + logEntry.Info("Global values changed by AfterAll hooks, will run ReloadAll.") reloadAll = true eventDescription = "AfterAll-Hooks-Change-GlobalValues" } @@ -2357,7 +2419,7 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s // if helm3lib is in use - reinit helm action configuration to update helm capabilities (newly available apiVersions and resoruce kinds) if op.Helm.ClientType == helm.Helm3Lib { if err := helm3lib.ReinitActionConfig(op.Logger.Named("helm3-client")); err != nil { - logEntry.Errorf("Couldn't reinitialize helm3lib action configuration: %s", err) + logEntry.Error("Couldn't reinitialize helm3lib action configuration", log.Err(err)) t.UpdateFailureMessage(err.Error()) t.WithQueuedAt(time.Now()) res.Status = queue.Fail @@ -2400,7 +2462,7 @@ func (op *AddonOperator) HandleGlobalHookRun(t sh_task.Task, labels map[string]s if isSynchronization && res.Status == queue.Success { op.ModuleManager.GlobalSynchronizationState().DoneForBinding(hm.KubernetesBindingId) // Unlock Kubernetes events for all monitors when Synchronization task is done. - logEntry.Debugf("Synchronization done, unlock Kubernetes events") + logEntry.Debug("Synchronization done, unlock Kubernetes events") for _, monitorID := range hm.MonitorIDs { taskHook.GetHookController().UnlockKubernetesEventsFor(monitorID) } @@ -2445,7 +2507,8 @@ func (op *AddonOperator) CreateConvergeModulesTasks(state *module_manager.Module queuedAt := time.Now() // Add ModuleDelete tasks to delete helm releases of disabled modules. - log.Debugf("The following modules are going to be disabled: %v", state.ModulesToDisable) + log.Debug("The following modules are going to be disabled", + slog.Any("modules", state.ModulesToDisable)) for _, moduleName := range state.ModulesToDisable { ev := events.ModuleEvent{ ModuleName: moduleName, @@ -2468,7 +2531,8 @@ func (op *AddonOperator) CreateConvergeModulesTasks(state *module_manager.Module // Add ModuleRun tasks to install or reload enabled modules. newlyEnabled := utils.ListToMapStringStruct(state.ModulesToEnable) - log.Debugf("The following modules are going to be enabled/rerun: %v", state.AllEnabledModulesByOrder) + log.Debug("The following modules are going to be enabled/rerun", + slog.String("modules", fmt.Sprintf("%v", state.AllEnabledModulesByOrder))) for _, modules := range state.AllEnabledModulesByOrder { newLogLabels := utils.MergeLabels(logLabels) @@ -2550,7 +2614,8 @@ func (op *AddonOperator) CreateConvergeModulesTasks(state *module_manager.Module modulesTasks = append(modulesTasks, newTask.WithQueuedAt(queuedAt)) default: - log.Errorf("Invalid ModulesState %v", state) + log.Error("Invalid ModulesState", + slog.String("state", fmt.Sprintf("%v", state))) } } // as resultingTasks contains new ensureCRDsTasks we invalidate @@ -2614,7 +2679,8 @@ func (op *AddonOperator) CheckConvergeStatus(t sh_task.Task) { // Report modules left to process. if convergeTasks > 0 && (t.GetType() == task.ModuleRun || t.GetType() == task.ModuleDelete) { moduleTasks := ConvergeModulesInQueue(op.engine.TaskQueues.GetMain()) - log.Infof("Converge modules in progress: %d modules left to process in queue 'main'", moduleTasks) + log.Info("Converge modules in progress", + slog.Int("count", moduleTasks)) } } @@ -2632,7 +2698,7 @@ func (op *AddonOperator) UpdateFirstConvergeStatus(convergeTasks int) { case converge.FirstStarted: // Switch to 'done' state after first converge is started and when no 'converge' tasks left in the queue. if convergeTasks == 0 { - log.Infof("First converge is finished. Operator is ready now.") + log.Info("First converge is finished. Operator is ready now.") op.ConvergeState.SetFirstRunPhase(converge.FirstDone) } } diff --git a/pkg/addon-operator/operator_test.go b/pkg/addon-operator/operator_test.go index a1ffaa63..7ad550d6 100644 --- a/pkg/addon-operator/operator_test.go +++ b/pkg/addon-operator/operator_test.go @@ -2,6 +2,7 @@ package addon_operator import ( "context" + "log/slog" "os" "path/filepath" "strings" @@ -513,7 +514,8 @@ func Test_HandleConvergeModules_global_changed(t *testing.T) { g.Eventually(convergeDone(op), "30s", "200ms").Should(BeTrue()) - log.Infof("Converge done, got %d tasks in history", len(taskHandleHistory)) + log.Info("Converge done, got tasks in history", + slog.Int("count", len(taskHandleHistory))) // Save current history length to ignore first converge tasks later. ignoreTasksCount := len(taskHandleHistory) @@ -534,7 +536,8 @@ func Test_HandleConvergeModules_global_changed(t *testing.T) { g.Expect(cmPatched.Data).Should(HaveKey("global")) g.Expect(cmPatched.Data["global"]).Should(Equal("param: newValue")) - log.Infof("ConfigMap patched, got %d tasks in history", len(taskHandleHistory)) + log.Info("ConfigMap patched, got tasks in history", + slog.Int("count", len(taskHandleHistory))) // Expect ConvergeModules appears in queue. g.Eventually(func() bool { diff --git a/pkg/addon-operator/queue.go b/pkg/addon-operator/queue.go index e365f94d..4445113d 100644 --- a/pkg/addon-operator/queue.go +++ b/pkg/addon-operator/queue.go @@ -1,6 +1,8 @@ package addon_operator import ( + "log/slog" + "github.com/deckhouse/deckhouse/pkg/log" "github.com/flant/addon-operator/pkg/addon-operator/converge" @@ -114,13 +116,21 @@ func RemoveCurrentConvergeTasks(convergeQueues []*queue.TaskQueue, logLabels map case task.ParallelModuleRun: if hm.ParallelRunMetadata == nil || hm.ParallelRunMetadata.CancelF == nil { - logEntry.Warnf("Couldn't get parallelRun metadata for the parallel task of type: %s, module: %s, description: %s, from queue %s", t.GetType(), hm.ModuleName, hm.EventDescription, queue.Name) + logEntry.Warn("Couldn't get parallelRun metadata for the parallel task", + slog.String("type", string(t.GetType())), + slog.String("module", hm.ModuleName), + slog.String("description", hm.EventDescription), + slog.String("queue", queue.Name)) } else { // cancel parallel task context hm.ParallelRunMetadata.CancelF() } } - logEntry.Debugf("Drained converge task of type: %s, module: %s, description: %s, from queue %s", t.GetType(), hm.ModuleName, hm.EventDescription, queue.Name) + logEntry.Debug("Drained converge task", + slog.String("type", string(t.GetType())), + slog.String("module", hm.ModuleName), + slog.String("description", hm.EventDescription), + slog.String("queue", queue.Name)) return false } return true @@ -164,7 +174,10 @@ func RemoveCurrentConvergeTasksFromId(q *queue.TaskQueue, afterId string, logLab stop = true } hm := task.HookMetadataAccessor(t) - logEntry.Debugf("Drained converge task of type: %s, module: %s, description: %s", t.GetType(), hm.ModuleName, hm.EventDescription) + logEntry.Debug("Drained converge task", + slog.String("type", string(t.GetType())), + slog.String("module", hm.ModuleName), + slog.String("description", hm.EventDescription)) return false } return true @@ -198,7 +211,9 @@ func RemoveAdjacentConvergeModules(q *queue.TaskQueue, afterId string, logLabels // Remove ConvergeModules after current. if t.GetType() == task.ConvergeModules { hm := task.HookMetadataAccessor(t) - logEntry.Debugf("Drained adjacent ConvergeModules task of type: %s, description: %s", t.GetType(), hm.EventDescription) + logEntry.Debug("Drained adjacent ConvergeModules task", + slog.String("type", string(t.GetType())), + slog.String("description", hm.EventDescription)) return false } diff --git a/pkg/helm/helm.go b/pkg/helm/helm.go index e08a9dac..4c5a1e1c 100644 --- a/pkg/helm/helm.go +++ b/pkg/helm/helm.go @@ -1,6 +1,7 @@ package helm import ( + "log/slog" "time" "github.com/deckhouse/deckhouse/pkg/log" @@ -51,7 +52,7 @@ func InitHelmClientFactory(helmopts *Options, extraLabels map[string]string) (*C }, helmopts.Logger, extraLabels) case Helm3: - log.Infof("Helm 3 detected (path is '%s')", helm3.Helm3Path) + log.Info("Helm 3 detected", slog.String("path", helm3.Helm3Path)) // Use helm3 client. factory.ClientType = Helm3 factory.NewClientFn = helm3.NewClient diff --git a/pkg/helm/helm3/helm3.go b/pkg/helm/helm3/helm3.go index 8eb59b86..8ec1a0a8 100644 --- a/pkg/helm/helm3/helm3.go +++ b/pkg/helm/helm3/helm3.go @@ -3,6 +3,7 @@ package helm3 import ( "bytes" "fmt" + "log/slog" "os/exec" "sort" "strings" @@ -87,7 +88,7 @@ func (h *Helm3Client) initAndVersion() error { } stdout = strings.Join([]string{stdout, stderr}, "\n") stdout = strings.ReplaceAll(stdout, "\n", " ") - log.Infof("Helm 3 version: %s", stdout) + log.Info("Helm 3 version", slog.String("version", stdout)) return nil } @@ -161,12 +162,20 @@ func (h *Helm3Client) UpgradeRelease(releaseName string, chart string, valuesPat args = append(args, setValue) } - h.Logger.Infof("Running helm upgrade for release '%s' with chart '%s' in namespace '%s' ...", releaseName, chart, namespace) + h.Logger.Info("Running helm upgrade for release ...", + slog.String("release", releaseName), + slog.String("chart", chart), + slog.String("namespace", namespace)) stdout, stderr, err := h.cmd(args...) if err != nil { return fmt.Errorf("helm upgrade failed: %s:\n%s %s", err, stdout, stderr) } - h.Logger.Infof("Helm upgrade for release '%s' with chart '%s' in namespace '%s' successful:\n%s\n%s", releaseName, chart, namespace, stdout, stderr) + h.Logger.Info("Helm upgrade for release successful", + slog.String("release", releaseName), + slog.String("chart", chart), + slog.String("namespace", namespace), + slog.String("stdout", stdout), + slog.String("stderr", stderr)) return nil } @@ -191,7 +200,7 @@ func (h *Helm3Client) GetReleaseValues(releaseName string) (utils.Values, error) } func (h *Helm3Client) DeleteRelease(releaseName string) (err error) { - h.Logger.Debugf("helm release '%s': execute helm uninstall", releaseName) + h.Logger.Debug("helm release: execute helm uninstall", slog.String("release", releaseName)) args := []string{ "uninstall", releaseName, @@ -202,7 +211,7 @@ func (h *Helm3Client) DeleteRelease(releaseName string) (err error) { return fmt.Errorf("helm uninstall %s invocation error: %v\n%v %v", releaseName, err, stdout, stderr) } - h.Logger.Debugf("helm release %s deleted", releaseName) + h.Logger.Debug("helm release deleted", slog.String("release", releaseName)) return } @@ -230,9 +239,9 @@ func (h *Helm3Client) ListReleasesNames() ([]string, error) { return nil, fmt.Errorf("helm list failed: %v\n%s %s", err, stdout, stderr) } - list := []struct { + var list []struct { Name string `json:"name"` - }{} + } if err := k8syaml.Unmarshal([]byte(stdout), &list); err != nil { return nil, fmt.Errorf("helm list returned invalid json: %v", err) @@ -278,12 +287,15 @@ func (h *Helm3Client) Render(releaseName string, chart string, valuesPaths []str args = append(args, setValue) } - h.Logger.Debugf("Render helm templates for chart '%s' in namespace '%s' ...", chart, namespace) + h.Logger.Debug("Render helm templates for chart ...", + slog.String("chart", chart), + slog.String("namespace", namespace)) stdout, stderr, err := h.cmd(args...) if err != nil { return "", fmt.Errorf("helm upgrade failed: %s:\n%s %s", err, stdout, stderr) } - h.Logger.Infof("Render helm templates for chart '%s' was successful", chart) + h.Logger.Info("Render helm templates for chart was successful", + slog.String("chart", chart)) return stdout, nil } diff --git a/pkg/helm/helm3lib/helm3lib.go b/pkg/helm/helm3lib/helm3lib.go index bacfe50c..c01115f8 100644 --- a/pkg/helm/helm3lib/helm3lib.go +++ b/pkg/helm/helm3lib/helm3lib.go @@ -2,7 +2,9 @@ package helm3lib import ( "context" + "errors" "fmt" + "log/slog" "os" "sort" "strconv" @@ -18,7 +20,7 @@ import ( "helm.sh/helm/v3/pkg/releaseutil" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" - "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/rest" @@ -125,7 +127,7 @@ func (h *LibClient) actionConfigInit() error { // If env is empty - default storage backend ('secrets') will be used helmDriver := os.Getenv("HELM_DRIVER") - err := ac.Init(getter, options.Namespace, helmDriver, h.Logger.Debugf) + err := ac.Init(getter, options.Namespace, helmDriver, h.Logger.Debug) if err != nil { return fmt.Errorf("init helm action config: %v", err) } @@ -141,7 +143,7 @@ func (h *LibClient) initAndVersion() error { return err } - log.Infof("Helm 3 version: %s", chartutil.DefaultCapabilities.HelmVersion.Version) + log.Info("Helm 3 version", slog.String("version", chartutil.DefaultCapabilities.HelmVersion.Version)) return nil } @@ -154,7 +156,7 @@ func (h *LibClient) LastReleaseStatus(releaseName string) (revision string, stat // return nil, errors.Errorf("no revision for release %q", name) // } // that's why we also check string representation - if err == driver.ErrReleaseNotFound || strings.HasPrefix(err.Error(), "no revision for release") { + if errors.Is(err, driver.ErrReleaseNotFound) || strings.HasPrefix(err.Error(), "no revision for release") { return "0", "", fmt.Errorf("release '%s' not found\n", releaseName) } return "", "", err @@ -173,7 +175,7 @@ func (h *LibClient) UpgradeRelease(releaseName string, chartName string, valuesP } return h.upgradeRelease(releaseName, chartName, valuesPaths, setValues, namespace) } - h.Logger.Debugf("helm release %s upgraded", releaseName) + h.Logger.Debug("helm release upgraded", slog.String("version", releaseName)) return nil } @@ -217,12 +219,15 @@ func (h *LibClient) upgradeRelease(releaseName string, chartName string, valuesP resultValues = chartutil.CoalesceTables(resultValues, m) } - h.Logger.Infof("Running helm upgrade for release '%s' with chart '%s' in namespace '%s' ...", releaseName, chartName, namespace) + h.Logger.Info("Running helm upgrade for release", + slog.String("release", releaseName), + slog.String("chart", chartName), + slog.String("namespace", namespace)) histClient := action.NewHistory(actionConfig) // Max is not working!!! Sort the final of releases by your own // histClient.Max = 1 releases, err := histClient.Run(releaseName) - if err == driver.ErrReleaseNotFound { + if errors.Is(err, driver.ErrReleaseNotFound) { instClient := action.NewInstall(actionConfig) if namespace != "" { instClient.Namespace = namespace @@ -237,7 +242,7 @@ func (h *LibClient) upgradeRelease(releaseName string, chartName string, valuesP _, err = instClient.Run(chart, resultValues) return err } - h.Logger.Debugf("%d old releases found", len(releases)) + h.Logger.Debug("old releases found", slog.Int("count", len(releases))) if len(releases) > 0 { // https://github.com/fluxcd/helm-controller/issues/149 // looking through this issue you can find the common error: another operation (install/upgrade/rollback) is in progress @@ -245,7 +250,10 @@ func (h *LibClient) upgradeRelease(releaseName string, chartName string, valuesP releaseutil.Reverse(releases, releaseutil.SortByRevision) latestRelease := releases[0] nsReleaseName := fmt.Sprintf("%s/%s", latestRelease.Namespace, latestRelease.Name) - h.Logger.Debugf("Latest release '%s': revision: %d has status: %s", nsReleaseName, latestRelease.Version, latestRelease.Info.Status) + h.Logger.Debug("Latest release info", + slog.String("release", nsReleaseName), + slog.Int("version", latestRelease.Version), + slog.String("status", string(latestRelease.Info.Status))) if latestRelease.Info.Status.IsPending() { objectName := fmt.Sprintf("%s.%s.v%d", storage.HelmStorageType, latestRelease.Name, latestRelease.Version) kubeClient, err := actionConfig.KubernetesClientSet() @@ -255,24 +263,36 @@ func (h *LibClient) upgradeRelease(releaseName string, chartName string, valuesP // switch between storage types (memory, sql, secrets, configmaps) - with secrets and configmaps we can deal a bit more straightforward than doing a rollback switch actionConfig.Releases.Name() { case driver.ConfigMapsDriverName: - h.Logger.Debugf("ConfigMap for helm revision %d of release %s in status %s, driver %s: will be deleted", latestRelease.Version, nsReleaseName, latestRelease.Info.Status, driver.ConfigMapsDriverName) + h.Logger.Debug("ConfigMap for helm", + slog.Int("version", latestRelease.Version), + slog.String("release", nsReleaseName), + slog.String("status", string(latestRelease.Info.Status)), + slog.String("driver", driver.ConfigMapsDriverName)) err := kubeClient.CoreV1().ConfigMaps(latestRelease.Namespace).Delete(context.TODO(), objectName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("couldn't delete configmap %s of release %s: %w", objectName, nsReleaseName, err) } - h.Logger.Debugf("ConfigMap %s was deleted", objectName) + h.Logger.Debug("ConfigMap was deleted", slog.String("name", objectName)) case driver.SecretsDriverName: - h.Logger.Debugf("Secret for helm revision %d of release %s in status %s, driver %s: will be deleted", latestRelease.Version, nsReleaseName, latestRelease.Info.Status, driver.SecretsDriverName) + h.Logger.Debug("Secret for helm will be deleted", + slog.Int("version", latestRelease.Version), + slog.String("release", nsReleaseName), + slog.String("status", string(latestRelease.Info.Status)), + slog.String("driver", driver.ConfigMapsDriverName)) err := kubeClient.CoreV1().Secrets(latestRelease.Namespace).Delete(context.TODO(), objectName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { + if err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("couldn't delete secret %s of release %s: %w", objectName, nsReleaseName, err) } - h.Logger.Debugf("Secret %s was deleted", objectName) + h.Logger.Debug("Secret was deleted", slog.String("name", objectName)) default: // memory and sql storages a bit more trickier - doing a rollback is justified - h.Logger.Debugf("Helm revision %d of release %s in status %s, driver %s: will be rolledback", latestRelease.Version, nsReleaseName, latestRelease.Info.Status, actionConfig.Releases.Name()) + h.Logger.Debug("Helm will be rollback", + slog.Int("version", latestRelease.Version), + slog.String("release", nsReleaseName), + slog.String("status", string(latestRelease.Info.Status)), + slog.String("driver", driver.ConfigMapsDriverName)) h.rollbackLatestRelease(releases) } } @@ -282,7 +302,10 @@ func (h *LibClient) upgradeRelease(releaseName string, chartName string, valuesP if err != nil { return fmt.Errorf("helm upgrade failed: %s\n", err) } - h.Logger.Infof("Helm upgrade for release '%s' with chart '%s' in namespace '%s' successful", releaseName, chartName, namespace) + h.Logger.Info("Helm upgrade successful", + slog.String("release", releaseName), + slog.String("chart", chartName), + slog.String("namespace", namespace)) return nil } @@ -291,14 +314,16 @@ func (h *LibClient) rollbackLatestRelease(releases []*release.Release) { latestRelease := releases[0] nsReleaseName := fmt.Sprintf("%s/%s", latestRelease.Namespace, latestRelease.Name) - h.Logger.Infof("Trying to rollback '%s'", nsReleaseName) + h.Logger.Info("Trying to rollback", slog.String("release", nsReleaseName)) if latestRelease.Version == 1 || options.HistoryMax == 1 || len(releases) == 1 { rb := action.NewUninstall(actionConfig) rb.KeepHistory = false _, err := rb.Run(latestRelease.Name) if err != nil { - h.Logger.Warnf("Failed to uninstall pending release %s: %s", nsReleaseName, err) + h.Logger.Warn("Failed to uninstall pending release", + slog.String("release", nsReleaseName), + log.Err(err)) return } } else { @@ -314,12 +339,14 @@ func (h *LibClient) rollbackLatestRelease(releases []*release.Release) { rb.CleanupOnFail = true err := rb.Run(latestRelease.Name) if err != nil { - h.Logger.Warnf("Failed to rollback pending release %s: %s", nsReleaseName, err) + h.Logger.Warn("Failed to rollback pending release", + slog.String("release", nsReleaseName), + log.Err(err)) return } } - h.Logger.Infof("Rollback '%s' successful", nsReleaseName) + h.Logger.Info("Rollback successful", slog.String("release", nsReleaseName)) } func (h *LibClient) GetReleaseValues(releaseName string) (utils.Values, error) { @@ -328,7 +355,7 @@ func (h *LibClient) GetReleaseValues(releaseName string) (utils.Values, error) { } func (h *LibClient) DeleteRelease(releaseName string) error { - h.Logger.Debugf("helm release '%s': execute helm uninstall", releaseName) + h.Logger.Debug("helm release: execute helm uninstall", slog.String("release", releaseName)) un := action.NewUninstall(actionConfig) _, err := un.Run(releaseName) @@ -336,7 +363,7 @@ func (h *LibClient) DeleteRelease(releaseName string) error { return fmt.Errorf("helm uninstall %s invocation error: %v\n", releaseName, err) } - h.Logger.Debugf("helm release %s deleted", releaseName) + h.Logger.Debug("helm release deleted", slog.String("release", releaseName)) return nil } @@ -403,7 +430,9 @@ func (h *LibClient) Render(releaseName, chartName string, valuesPaths, setValues resultValues = chartutil.CoalesceTables(resultValues, m) } - h.Logger.Debugf("Render helm templates for chart '%s' in namespace '%s' ...", chartName, namespace) + h.Logger.Debug("Render helm templates for chart ...", + slog.String("chart", chartName), + slog.String("namespace", namespace)) inst := newInstAction(namespace, releaseName) @@ -411,7 +440,7 @@ func (h *LibClient) Render(releaseName, chartName string, valuesPaths, setValues if err != nil { // helm render can fail because the CRD were previously created // handling this case we can reinitialize RESTClient and repeat one more time by backoff - h.actionConfigInit() + _ = h.actionConfigInit() inst = newInstAction(namespace, releaseName) rs, err = inst.Run(chart, resultValues) @@ -428,7 +457,7 @@ func (h *LibClient) Render(releaseName, chartName string, valuesPaths, setValues rs.Manifest += fmt.Sprintf("\n\n\n%v", err) } - h.Logger.Infof("Render helm templates for chart '%s' was successful", chartName) + h.Logger.Info("Render helm templates for chart was successful", slog.String("chart", chartName)) return rs.Manifest, nil } diff --git a/pkg/helm_resources_manager/helm_resources_manager.go b/pkg/helm_resources_manager/helm_resources_manager.go index 74ea75e4..4346645f 100644 --- a/pkg/helm_resources_manager/helm_resources_manager.go +++ b/pkg/helm_resources_manager/helm_resources_manager.go @@ -3,6 +3,7 @@ package helm_resources_manager import ( "context" "fmt" + "log/slog" "github.com/deckhouse/deckhouse/pkg/log" "k8s.io/apimachinery/pkg/labels" @@ -103,7 +104,8 @@ func (hm *helmResourcesManager) Ch() chan ReleaseStatusEvent { } func (hm *helmResourcesManager) StartMonitor(moduleName string, manifests []manifest.Manifest, defaultNamespace string, lastReleaseStatus func(releaseName string) (revision string, status string, err error)) { - log.Debugf("Start helm resources monitor for '%s'", moduleName) + log.Debug("Start helm resources monitor for module", + slog.String("module", moduleName)) hm.StopMonitor(moduleName) cfg := &ResourceMonitorConfig{ @@ -125,9 +127,13 @@ func (hm *helmResourcesManager) StartMonitor(moduleName string, manifests []mani } func (hm *helmResourcesManager) absentResourcesCallback(moduleName string, unexpectedStatus bool, absent []manifest.Manifest, defaultNs string) { - log.Debugf("Detect absent resources for %s", moduleName) + log.Debug("Detect absent resources for module", + slog.String("module", moduleName)) for _, m := range absent { - log.Debugf("%s/%s/%s", m.Namespace(defaultNs), m.Kind(), m.Name()) + log.Debug("absent module", + slog.String("namespace", m.Namespace(defaultNs)), + slog.String("kind", m.Kind()), + slog.String("module", m.Name())) } hm.eventCh <- ReleaseStatusEvent{ ModuleName: moduleName, diff --git a/pkg/helm_resources_manager/resources_monitor.go b/pkg/helm_resources_manager/resources_monitor.go index b9a2d282..eeaeaac4 100644 --- a/pkg/helm_resources_manager/resources_monitor.go +++ b/pkg/helm_resources_manager/resources_monitor.go @@ -3,6 +3,7 @@ package helm_resources_manager import ( "context" "fmt" + "log/slog" "math/rand" "sort" "sync" @@ -101,11 +102,13 @@ func (r *ResourcesMonitor) Start() { // Check release status status, err := r.GetHelmReleaseStatus(r.moduleName) if err != nil { - r.logger.Errorf("cannot get helm release status: %s", err) + r.logger.Error("cannot get helm release status", log.Err(err)) } if status != "deployed" { - r.logger.Debugf("Helm release %s is in unexpected status: %s", r.moduleName, status) + r.logger.Debug("Helm release is in unexpected status", + slog.String("module", r.moduleName), + slog.String("status", status)) if r.absentCb != nil { r.absentCb(r.moduleName, true, []manifest.Manifest{}, r.defaultNamespace) } @@ -114,7 +117,7 @@ func (r *ResourcesMonitor) Start() { // Check resources absent, err := r.AbsentResources() if err != nil { - r.logger.Errorf("cannot list helm resources: %s", err) + r.logger.Error("cannot list helm resources", log.Err(err)) } if len(absent) > 0 { @@ -140,7 +143,10 @@ func (r *ResourcesMonitor) GetHelmReleaseStatus(moduleName string) (string, erro if err != nil { return "", err } - r.logger.Debugf("Helm release %s, revision %s, status: %s", moduleName, revision, status) + r.logger.Debug("Helm release", + slog.String("module", moduleName), + slog.String("revision", revision), + slog.String("status", status)) return status, nil } @@ -280,7 +286,8 @@ func (r *ResourcesMonitor) listResources(ctx context.Context, nsgvk namespacedGV Version: nsgvk.GVK.Version, Kind: nsgvk.GVK.Kind + "List", }) - log.Debugf("List objects from cache for %v", nsgvk) + log.Debug("List objects from cache", + slog.String("nsgvk", fmt.Sprintf("%v", nsgvk))) err := r.cache.List(ctx, objList, cr_client.InNamespace(nsgvk.Namespace)) if err != nil { return nil, fmt.Errorf("couldn't list objects from cache: %v", err) diff --git a/pkg/kube_config_manager/backend/configmap/configmap.go b/pkg/kube_config_manager/backend/configmap/configmap.go index 06d6d23d..0c52a3ba 100644 --- a/pkg/kube_config_manager/backend/configmap/configmap.go +++ b/pkg/kube_config_manager/backend/configmap/configmap.go @@ -3,6 +3,7 @@ package configmap import ( "context" "fmt" + "log/slog" "strings" "time" @@ -55,7 +56,8 @@ func (b Backend) LoadConfig(ctx context.Context, _ ...string) (*config.KubeConfi } if obj == nil { - b.logger.Infof("Initial config from ConfigMap/%s: resource is not found", b.name) + b.logger.Info("Initial config from ConfigMap: resource is not found", + slog.String("name", b.name)) return nil, nil } @@ -81,9 +83,11 @@ func (b Backend) saveGlobalConfigValues(ctx context.Context, values utils.Values } if b.isDebugEnabled(ctx) { - b.logger.Infof("Save global values to ConfigMap/%s:\n%s", b.name, values.DebugString()) + b.logger.Info("Save global values to ConfigMap", + slog.String("name", b.name), + slog.String("values", values.DebugString())) } else { - b.logger.Infof("Save global values to ConfigMap/%s", b.name) + b.logger.Info("Save global values to ConfigMap", slog.String("name", b.name)) } err = b.mergeValues(ctx, globalKubeConfig.GetValuesWithGlobalName()) @@ -110,9 +114,14 @@ func (b Backend) saveModuleConfigValues(ctx context.Context, moduleName string, } if b.isDebugEnabled(ctx) { - b.logger.Infof("Save module '%s' values to ConfigMap/%s:\n%s", moduleName, b.name, values.DebugString()) + b.logger.Info("Save module values to ConfigMap", + slog.String("moduleName", moduleName), + slog.String("configMapName", b.name), + slog.String("values", values.DebugString())) } else { - b.logger.Infof("Save module '%s' values to ConfigMap/%s", moduleName, b.name) + b.logger.Info("Save module values to ConfigMap", + slog.String("moduleName", moduleName), + slog.String("configMapName", b.name)) } err := b.mergeValues(ctx, moduleKubeConfig.GetValuesWithModuleName()) //nolint: staticcheck,nolintlint @@ -334,14 +343,18 @@ func (b Backend) StartInformer(ctx context.Context, eventC chan config.Event) { b.logConfigMapEvent(ctx, obj, "add") err := b.handleConfigMapEvent(obj.(*v1.ConfigMap), eventC) if err != nil { - b.logger.Errorf("Handle ConfigMap/%s 'add' error: %s", b.name, err) + b.logger.Error("Handle ConfigMap 'add' error", + slog.String("configMapName", b.name), + log.Err(err)) } }, UpdateFunc: func(_ interface{}, obj interface{}) { b.logConfigMapEvent(ctx, obj, "update") err := b.handleConfigMapEvent(obj.(*v1.ConfigMap), eventC) if err != nil { - b.logger.Errorf("Handle ConfigMap/%s 'update' error: %s", b.name, err) + b.logger.Error("Handle ConfigMap 'update' error", + slog.String("configMapName", b.name), + log.Err(err)) } }, DeleteFunc: func(obj interface{}) { @@ -362,10 +375,16 @@ func (b Backend) logConfigMapEvent(ctx context.Context, obj interface{}, eventNa objYaml, err := yaml.Marshal(obj) if err != nil { - b.logger.Infof("Dump ConfigMap/%s '%s' error: %s", b.name, eventName, err) + b.logger.Info("Dump ConfigMap error", + slog.String("configMapName", b.name), + slog.String("eventName", eventName), + log.Err(err)) return } - b.logger.Infof("Dump ConfigMap/%s '%s':\n%s", b.name, eventName, objYaml) + b.logger.Info("Dump ConfigMap", + slog.String("configMapName", b.name), + slog.String("eventName", eventName), + slog.String("value", string(objYaml))) } func (b Backend) handleConfigMapEvent(obj *v1.ConfigMap, eventC chan config.Event) error { @@ -379,7 +398,9 @@ func (b Backend) handleConfigMapEvent(obj *v1.ConfigMap, eventC chan config.Even if err != nil { eventC <- config.Event{Key: "batch", Err: err} // Do not update caches to detect changes on next update. - b.logger.Errorf("ConfigMap/%s invalid: %v", b.name, err) + b.logger.Error("ConfigMap invalid", + slog.String("configMapName", b.name), + log.Err(err)) return err } diff --git a/pkg/kube_config_manager/kube_config_manager.go b/pkg/kube_config_manager/kube_config_manager.go index cfa2cf5e..fdf8478b 100644 --- a/pkg/kube_config_manager/kube_config_manager.go +++ b/pkg/kube_config_manager/kube_config_manager.go @@ -3,6 +3,7 @@ package kube_config_manager import ( "context" "fmt" + "log/slog" "reflect" "strconv" "sync" @@ -39,7 +40,7 @@ type KubeConfigManager struct { func NewKubeConfigManager(ctx context.Context, bk backend.ConfigHandler, runtimeConfig *runtimeConfig.Config, logger *log.Logger) *KubeConfigManager { cctx, cancel := context.WithCancel(ctx) logger = logger.With("component", "KubeConfigManager") - logger.With("backend", fmt.Sprintf("%T", bk)).Infof("Setup KubeConfigManager backend") + logger.With("backend", fmt.Sprintf("%T", bk)).Info("Setup KubeConfigManager backend") // Runtime config to enable logging all events from the ConfigMap at runtime. if runtimeConfig != nil { @@ -170,10 +171,10 @@ func (kcm *KubeConfigManager) isGlobalChanged(newConfig *config.KubeConfig) bool // Fire event when global section is deleted: ConfigMap has no global section but global config is cached. // Note: no checksum checking here, "save" operations can't delete global section. if kcm.currentConfig.Global != nil { - kcm.logger.Infof("Global section deleted") + kcm.logger.Info("Global section deleted") return true } - kcm.logger.Debugf("Global section is empty") + kcm.logger.Debug("Global section is empty") return false } @@ -182,18 +183,18 @@ func (kcm *KubeConfigManager) isGlobalChanged(newConfig *config.KubeConfig) bool if kcm.knownChecksums.HasEqualChecksum(utils.GlobalValuesKey, newChecksum) { // Remove known checksum, do not fire event on self-update. kcm.knownChecksums.Remove(utils.GlobalValuesKey, newChecksum) - kcm.logger.Debugf("Global section self-update") + kcm.logger.Debug("Global section self-update") return false } if kcm.currentConfig.Global == nil { // "global" section is added after initialization. - kcm.logger.Infof("Global section added") + kcm.logger.Info("Global section added") return true } // Consider "global" change when new checksum is not equal to the saved. if kcm.currentConfig.Global.Checksum != newChecksum { - kcm.logger.Infof("Global section updated") + kcm.logger.Info("Global section updated") return true } @@ -208,7 +209,9 @@ func (kcm *KubeConfigManager) handleConfigEvent(obj config.Event) { kcm.configEventCh <- config.KubeConfigEvent{ Type: config.KubeConfigInvalid, } - kcm.logger.Errorf("Config/%s invalid: %v", obj.Key, obj.Err) + kcm.logger.Error("Config invalid", + slog.String("name", obj.Key), + log.Err(obj.Err)) return } @@ -248,7 +251,7 @@ func (kcm *KubeConfigManager) handleConfigEvent(obj config.Event) { moduleName := obj.Key moduleCfg := obj.Config.Modules[obj.Key] if obj.Op == config.EventDelete { - kcm.logger.Infof("Module section deleted: %+v", moduleName) + kcm.logger.Info("Module section deleted", slog.String("moduleName", moduleName)) modulesChanged = append(modulesChanged, moduleName) if kcm.currentConfig.Modules[moduleName].GetEnabled() != "" && kcm.currentConfig.Modules[moduleName].GetEnabled() != "n/d" { modulesStateChanged = append(modulesStateChanged, moduleName) @@ -275,17 +278,18 @@ func (kcm *KubeConfigManager) handleConfigEvent(obj config.Event) { if kcm.currentConfig.Modules[moduleName].GetEnabled() != moduleCfg.GetEnabled() { modulesStateChanged = append(modulesStateChanged, moduleName) } - kcm.logger.Infof("Module section '%s' changed. Enabled flag transition: %s--%s", - moduleName, - kcm.currentConfig.Modules[moduleName].GetEnabled(), - moduleCfg.GetEnabled(), - ) + kcm.logger.Info("Module section changed. Enabled flag transition.", + slog.String("moduleName", moduleName), + slog.String("current", kcm.currentConfig.Modules[moduleName].GetEnabled()), + slog.String("previous", moduleCfg.GetEnabled())) } else { modulesChanged = append(modulesChanged, moduleName) if moduleCfg.GetEnabled() != "" && moduleCfg.GetEnabled() != "n/d" { modulesStateChanged = append(modulesStateChanged, moduleName) } - kcm.logger.Infof("Module section '%s' added. Enabled flag: %s", moduleName, moduleCfg.GetEnabled()) + kcm.logger.Info("Module section added", + slog.String("moduleName", moduleName), + slog.String("enabledFlag", moduleCfg.GetEnabled())) } } @@ -306,7 +310,7 @@ func (kcm *KubeConfigManager) handleBatchConfigEvent(obj config.Event) { kcm.configEventCh <- config.KubeConfigEvent{ Type: config.KubeConfigInvalid, } - kcm.logger.Errorf("Batch Config invalid: %v", obj.Err) + kcm.logger.Error("Batch Config invalid", log.Err(obj.Err)) return } @@ -349,17 +353,18 @@ func (kcm *KubeConfigManager) handleBatchConfigEvent(obj config.Event) { if kcm.currentConfig.Modules[moduleName].GetEnabled() != moduleCfg.GetEnabled() { modulesStateChanged = append(modulesStateChanged, moduleName) } - kcm.logger.Infof("Module section '%s' changed. Enabled flag transition: %s--%s", - moduleName, - kcm.currentConfig.Modules[moduleName].GetEnabled(), - moduleCfg.GetEnabled(), - ) + kcm.logger.Info("Module section changed. Enabled flag transition", + slog.String("moduleName", moduleName), + slog.String("current", kcm.currentConfig.Modules[moduleName].GetEnabled()), + slog.String("previous", moduleCfg.GetEnabled())) } else { modulesChanged = append(modulesChanged, moduleName) if moduleCfg.GetEnabled() != "" && moduleCfg.GetEnabled() != "n/d" { modulesStateChanged = append(modulesStateChanged, moduleName) } - kcm.logger.Infof("Module section '%s' added. Enabled flag: %s", moduleName, moduleCfg.GetEnabled()) + kcm.logger.Info("Module section added", + slog.String("moduleName", moduleName), + slog.String("enabledFlag", moduleCfg.GetEnabled())) } } } @@ -372,7 +377,8 @@ func (kcm *KubeConfigManager) handleBatchConfigEvent(obj config.Event) { modulesStateChanged = append(modulesStateChanged, moduleName) } } - kcm.logger.Infof("Module sections deleted: %+v", currentModuleNames) + kcm.logger.Info("Module sections deleted", + slog.String("modules", fmt.Sprintf("%+v", currentModuleNames))) } // Update state after successful parsing. @@ -391,7 +397,7 @@ func (kcm *KubeConfigManager) handleBatchConfigEvent(obj config.Event) { } func (kcm *KubeConfigManager) Start() { - kcm.logger.Debugf("Start kube config manager") + kcm.logger.Debug("Start kube config manager") go kcm.start() } @@ -411,7 +417,7 @@ func (kcm *KubeConfigManager) start() { } case <-kcm.ctx.Done(): - kcm.logger.Debugf("Stop kube config manager") + kcm.logger.Debug("Stop kube config manager") return } } diff --git a/pkg/module_manager/go_hook/patchable_values.go b/pkg/module_manager/go_hook/patchable_values.go index f9c5216c..f27ace19 100644 --- a/pkg/module_manager/go_hook/patchable_values.go +++ b/pkg/module_manager/go_hook/patchable_values.go @@ -3,6 +3,7 @@ package go_hook import ( "encoding/json" "fmt" + "log/slog" "strings" "github.com/deckhouse/deckhouse/pkg/log" @@ -66,7 +67,9 @@ func (p *PatchableValues) Set(path string, value interface{}) { if err != nil { // The struct returned from a Go hook expected to be marshalable in all cases. // TODO(nabokihms): return a meaningful error. - log.Errorf("patch path %s: %v\n", path, err) + log.Error("patch path", + slog.String("path", path), + log.Err(err)) return } diff --git a/pkg/module_manager/loader/fs/fs.go b/pkg/module_manager/loader/fs/fs.go index a53c1348..7fd7bb13 100644 --- a/pkg/module_manager/loader/fs/fs.go +++ b/pkg/module_manager/loader/fs/fs.go @@ -1,8 +1,10 @@ package fs import ( + "errors" "fmt" "io/fs" + "log/slog" "os" "path/filepath" "regexp" @@ -72,7 +74,7 @@ func (fl *FileSystemLoader) getBasicModule(definition moduleDefinition, commonSt return m, nil } -// reads single directory and returns BasicModule +// LoadModule reads single directory and returns BasicModule func (fl *FileSystemLoader) LoadModule(_, modulePath string) (*modules.BasicModule, error) { // the module's parent directory var modulesDir string @@ -188,9 +190,11 @@ func resolveDirEntry(dirPath string, entry os.DirEntry) (string, string, error) // Check if entry is a symlink to a directory. targetPath, err := resolveSymlinkToDir(dirPath, entry) if err != nil { - if e, ok := err.(*fs.PathError); ok { + var e *fs.PathError + if errors.As(err, &e) { if e.Err.Error() == "no such file or directory" { - log.Warnf("Symlink target %q does not exist. Ignoring module", dirPath) + log.Warn("Symlink target does not exist. Ignoring module", + slog.String("dir", dirPath)) return "", "", nil } } @@ -203,7 +207,8 @@ func resolveDirEntry(dirPath string, entry os.DirEntry) (string, string, error) } if name != utils.ValuesFileName { - log.Warnf("Ignore '%s' while searching for modules", absPath) + log.Warn("Ignore dir while searching for modules", + slog.String("dir", absPath)) } return "", "", nil } diff --git a/pkg/module_manager/models/hooks/kind/batch_hook.go b/pkg/module_manager/models/hooks/kind/batch_hook.go index 6b9284af..61aea5c5 100644 --- a/pkg/module_manager/models/hooks/kind/batch_hook.go +++ b/pkg/module_manager/models/hooks/kind/batch_hook.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "log/slog" "os" "os/exec" "path/filepath" @@ -116,7 +117,9 @@ func (h *BatchHook) Execute(configVersion string, bContext []bindingcontext.Bind err := os.Remove(f) if err != nil { h.Hook.Logger.With("hook", h.GetName()). - Errorf("Remove tmp file '%s': %s", f, err) + Error("Remove tmp file", + slog.String("file", f), + log.Err(err)) } } }() @@ -346,7 +349,9 @@ func (h *BatchHook) prepareConfigValuesJsonFile(moduleSafeName string, configVal return "", err } - h.Hook.Logger.Debugf("Prepared module %s hook config values:\n%s", moduleSafeName, configValues.DebugString()) + h.Hook.Logger.Debug("Prepared module hook config values", + slog.String("module", moduleSafeName), + slog.String("values", configValues.DebugString())) return path, nil } @@ -363,7 +368,9 @@ func (h *BatchHook) prepareValuesJsonFile(moduleSafeName string, values utils.Va return "", err } - h.Hook.Logger.Debugf("Prepared module %s hook values:\n%s", moduleSafeName, values.DebugString()) + h.Hook.Logger.Debug("Prepared module hook values", + slog.String("module", moduleSafeName), + slog.String("values", values.DebugString())) return path, nil } diff --git a/pkg/module_manager/models/hooks/kind/shellhook.go b/pkg/module_manager/models/hooks/kind/shellhook.go index d87758d1..626a15dc 100644 --- a/pkg/module_manager/models/hooks/kind/shellhook.go +++ b/pkg/module_manager/models/hooks/kind/shellhook.go @@ -2,6 +2,7 @@ package kind import ( "fmt" + "log/slog" "os" "path/filepath" "strings" @@ -111,7 +112,9 @@ func (sh *ShellHook) Execute(configVersion string, bContext []bindingcontext.Bin err := os.Remove(f) if err != nil { sh.Hook.Logger.With("hook", sh.GetName()). - Errorf("Remove tmp file '%s': %s", f, err) + Error("Remove tmp file", + slog.String("file", f), + log.Err(err)) } } }() @@ -184,15 +187,21 @@ func (sh *ShellHook) getConfig() (configOutput []byte, err error) { WithLogger(sh.Logger.Named("executor")). WithCMDStdout(nil) - sh.Hook.Logger.Debugf("Executing hook in: '%s'", strings.Join(args, " ")) + sh.Hook.Logger.Debug("Executing hook", + slog.String("args", strings.Join(args, " "))) output, err := cmd.Output() if err != nil { - sh.Hook.Logger.Debugf("Hook '%s' config failed: %v, output:\n%s", sh.Name, err, string(output)) + sh.Hook.Logger.Debug("Hook config failed", + slog.String("hook", sh.Name), + log.Err(err), + slog.String("output", string(output))) return nil, err } - sh.Hook.Logger.Debugf("Hook '%s' config output:\n%s", sh.Name, string(output)) + sh.Hook.Logger.Debug("Hook config output", + slog.String("hook", sh.Name), + slog.String("output", string(output))) return output, nil } @@ -489,7 +498,9 @@ func (sh *ShellHook) prepareConfigValuesJsonFile(moduleSafeName string, configVa return "", err } - sh.Hook.Logger.Debugf("Prepared module %s hook config values:\n%s", moduleSafeName, configValues.DebugString()) + sh.Hook.Logger.Debug("Prepared module hook config values", + slog.String("module", moduleSafeName), + slog.String("values", configValues.DebugString())) return path, nil } @@ -506,7 +517,9 @@ func (sh *ShellHook) prepareValuesJsonFile(moduleSafeName string, values utils.V return "", err } - sh.Hook.Logger.Debugf("Prepared module %s hook values:\n%s", moduleSafeName, values.DebugString()) + sh.Hook.Logger.Debug("Prepared module hook values", + slog.String("module", moduleSafeName), + slog.String("values", values.DebugString())) return path, nil } diff --git a/pkg/module_manager/models/modules/basic.go b/pkg/module_manager/models/modules/basic.go index 33321c55..03a57a18 100644 --- a/pkg/module_manager/models/modules/basic.go +++ b/pkg/module_manager/models/modules/basic.go @@ -195,11 +195,11 @@ func (bm *BasicModule) ResetState() { // RegisterHooks find and registers all module hooks from a filesystem or GoHook Registry func (bm *BasicModule) RegisterHooks(logger *log.Logger) ([]*hooks.ModuleHook, error) { if bm.hooks.registered { - logger.Debugf("Module hooks already registered") + logger.Debug("Module hooks already registered") return nil, nil } - logger.Debugf("Search and register hooks") + logger.Debug("Search and register hooks") hks, err := bm.searchAndRegisterHooks(logger) if err != nil { @@ -263,7 +263,8 @@ func (bm *BasicModule) searchModuleShellHooks() (hks []*kind.ShellHook, err erro // sort hooks by path sort.Strings(hooksRelativePaths) - bm.logger.Debugf("Hook paths: %+v", hooksRelativePaths) + bm.logger.Debug("Hook paths", + slog.Any("paths", hooksRelativePaths)) for _, hookPath := range hooksRelativePaths { hookName, err := filepath.Rel(filepath.Dir(bm.Path), hookPath) @@ -360,7 +361,6 @@ func RecursiveGetBatchHookExecutablePaths(dir string, logger *log.Logger) ([]str } var ( - ErrFileHasNotMetRequirements = errors.New("file has not met requirements") ErrFileHasWrongExtension = errors.New("file has wrong extension") ErrFileIsNotBatchHook = errors.New("file is not batch hook") ErrFileNoExecutablePermissions = errors.New("no executable permissions, chmod +x is required to run this hook") @@ -409,10 +409,12 @@ func (bm *BasicModule) searchAndRegisterHooks(logger *log.Logger) ([]*hooks.Modu return nil, fmt.Errorf("search module hooks failed: %w", err) } - logger.Debugf("Found %d hooks", len(hks)) + logger.Debug("Found hooks", slog.Int("count", len(hks))) if logger.GetLevel() == log.LevelDebug { for _, h := range hks { - logger.Debugf("ModuleHook: Name=%s, Path=%s", h.GetName(), h.GetPath()) + logger.Debug("ModuleHook", + slog.String("name", h.GetName()), + slog.String("path", h.GetPath())) } } @@ -445,7 +447,9 @@ func (bm *BasicModule) searchAndRegisterHooks(logger *log.Logger) ([]*hooks.Modu // register module hook in indexes bm.hooks.AddHook(moduleHook) - hookLogEntry.Debugf("Module hook from '%s'. Bindings: %s", moduleHook.GetPath(), moduleHook.GetConfigDescription()) + hookLogEntry.Debug("Module hook", + slog.String("path", moduleHook.GetPath()), + slog.String("bindings", moduleHook.GetConfigDescription())) } return hks, nil @@ -568,7 +572,9 @@ func (bm *BasicModule) RunEnabledScript(tmpDir string, precedingEnabledModules [ enabledScriptPath := filepath.Join(bm.Path, "enabled") configValuesPath, err := bm.prepareConfigValuesJsonFile(tmpDir) if err != nil { - logEntry.Errorf("Prepare CONFIG_VALUES_PATH file for '%s': %s", enabledScriptPath, err) + logEntry.Error("Prepare CONFIG_VALUES_PATH file", + slog.String("path", enabledScriptPath), + log.Err(err)) return false, err } defer func() { @@ -578,13 +584,17 @@ func (bm *BasicModule) RunEnabledScript(tmpDir string, precedingEnabledModules [ err := os.Remove(configValuesPath) if err != nil { bm.logger.With("module", bm.Name). - Errorf("Remove tmp file '%s': %s", configValuesPath, err) + Error("Remove tmp file", + slog.String("path", enabledScriptPath), + log.Err(err)) } }() valuesPath, err := bm.prepareValuesJsonFileForEnabledScript(tmpDir, precedingEnabledModules) if err != nil { - logEntry.Errorf("Prepare VALUES_PATH file for '%s': %s", enabledScriptPath, err) + logEntry.Error("Prepare VALUES_PATH file", + slog.String("path", enabledScriptPath), + log.Err(err)) return false, err } defer func() { @@ -594,13 +604,17 @@ func (bm *BasicModule) RunEnabledScript(tmpDir string, precedingEnabledModules [ err := os.Remove(valuesPath) if err != nil { bm.logger.With("module", bm.Name). - Errorf("Remove tmp file '%s': %s", configValuesPath, err) + Error("Remove tmp file", + slog.String("path", configValuesPath), + log.Err(err)) } }() enabledResultFilePath, err := bm.prepareModuleEnabledResultFile(tmpDir) if err != nil { - logEntry.Errorf("Prepare MODULE_ENABLED_RESULT file for '%s': %s", enabledScriptPath, err) + logEntry.Error("Prepare MODULE_ENABLED_RESULT file", + slog.String("path", enabledScriptPath), + log.Err(err)) return false, err } defer func() { @@ -610,11 +624,15 @@ func (bm *BasicModule) RunEnabledScript(tmpDir string, precedingEnabledModules [ err := os.Remove(enabledResultFilePath) if err != nil { bm.logger.With("module", bm.Name). - Errorf("Remove tmp file '%s': %s", configValuesPath, err) + Error("Remove tmp file", + slog.String("path", configValuesPath), + log.Err(err)) } }() - logEntry.Debugf("Execute enabled script '%s', preceding modules: %v", enabledScriptPath, precedingEnabledModules) + logEntry.Debug("Execute enabled script", + slog.String("path", enabledScriptPath), + slog.Any("modules", precedingEnabledModules)) envs := make([]string, 0) envs = append(envs, os.Environ()...) @@ -645,13 +663,17 @@ func (bm *BasicModule) RunEnabledScript(tmpDir string, precedingEnabledModules [ bm.dc.MetricStorage.GaugeSet("{PREFIX}module_hook_run_max_rss_bytes", float64(usage.MaxRss)*1024, metricLabels) } if err != nil { - logEntry.Errorf("Fail to run enabled script '%s': %s", enabledScriptPath, err) + logEntry.Error("Fail to run enabled script", + slog.String("path", enabledScriptPath), + log.Err(err)) return false, err } moduleEnabled, err := bm.readModuleEnabledResult(enabledResultFilePath) if err != nil { - logEntry.Errorf("Read enabled result from '%s': %s", enabledScriptPath, err) + logEntry.Error("Read enabled result", + slog.String("path", enabledScriptPath), + log.Err(err)) return false, fmt.Errorf("bad enabled result") } @@ -659,7 +681,9 @@ func (bm *BasicModule) RunEnabledScript(tmpDir string, precedingEnabledModules [ if moduleEnabled { result = "Enabled" } - logEntry.Infof("Enabled script run successful, result '%v', module '%s'", moduleEnabled, result) + logEntry.Info("Enabled script run successful", + slog.Bool("result", moduleEnabled), + slog.String("status", result)) bm.state.enabledScriptResult = &moduleEnabled return moduleEnabled, nil } @@ -707,7 +731,9 @@ func (bm *BasicModule) prepareValuesJsonFileWith(tmpdir string, values utils.Val return "", err } - bm.logger.Debugf("Prepared module %s hook values:\n%s", bm.Name, values.DebugString()) + bm.logger.Debug("Prepared module hook values", + slog.String("module", bm.Name), + slog.String("values", values.DebugString())) return path, nil } @@ -752,7 +778,9 @@ func (bm *BasicModule) prepareConfigValuesJsonFile(tmpDir string) (string, error return "", err } - bm.logger.Debugf("Prepared module %s hook config values:\n%s", bm.Name, v.DebugString()) + bm.logger.Debug("Prepared module hook config values", + slog.String("module", bm.Name), + slog.String("values", v.DebugString())) return path, nil } @@ -776,7 +804,8 @@ func (bm *BasicModule) executeHook(h *hooks.ModuleHook, bindingType sh_op_types. logEntry.Log(context.Background(), logStartLevel.Level(), "Module hook start", slog.String(bm.Name, h.GetName())) for _, info := range h.GetHookController().SnapshotsInfo() { - logEntry.Debugf("snapshot info: %s", info) + logEntry.Debug("snapshot info", + slog.String("value", info)) } prefixedConfigValues := bm.valuesStorage.GetConfigValues(true) @@ -846,7 +875,8 @@ func (bm *BasicModule) executeHook(h *hooks.ModuleHook, bindingType sh_op_types. } if configValuesPatchResult.ValuesChanged { - logEntry.Debugf("Module hook '%s': validate module config values before update", h.GetName()) + logEntry.Debug("Module hook: validate module config values before update", + slog.String("module", h.GetName())) // Validate merged static and new values. newValues, validationErr := bm.valuesStorage.GenerateNewConfigValues(configValuesPatchResult.Values, true) if validationErr != nil { @@ -858,13 +888,18 @@ func (bm *BasicModule) executeHook(h *hooks.ModuleHook, bindingType sh_op_types. err := bm.dc.KubeConfigManager.SaveConfigValues(bm.Name, configValuesPatchResult.Values) if err != nil { - logEntry.Debugf("Module hook '%s' kube module config values stay unchanged:\n%s", h.GetName(), bm.valuesStorage.GetConfigValues(false).DebugString()) + logEntry.Debug("Module hook kube module config values stay unchanged", + slog.String("module", h.GetName()), + slog.String("values", bm.valuesStorage.GetConfigValues(false).DebugString())) return fmt.Errorf("module hook '%s': set kube module config failed: %s", h.GetName(), err) } bm.valuesStorage.SaveConfigValues(newValues) - logEntry.Debugf("Module hook '%s': kube module '%s' config values updated:\n%s", h.GetName(), bm.Name, bm.valuesStorage.GetConfigValues(false).DebugString()) + logEntry.Debug("Module hook: kube module config values updated", + slog.String("hook", h.GetName()), + slog.String("module", bm.Name), + slog.String("values", bm.valuesStorage.GetConfigValues(false).DebugString())) } } @@ -876,7 +911,8 @@ func (bm *BasicModule) executeHook(h *hooks.ModuleHook, bindingType sh_op_types. return fmt.Errorf("module hook '%s': dynamic module values update error: %s", h.GetName(), err) } if valuesPatchResult.ValuesChanged { - logEntry.Debugf("Module hook '%s': validate module values before update", h.GetName()) + logEntry.Debug("Module hook: validate module values before update", + slog.String("module", h.GetName())) // Validate schema for updated module values validationErr := bm.valuesStorage.validateValues(valuesPatchResult.Values) @@ -894,11 +930,16 @@ func (bm *BasicModule) executeHook(h *hooks.ModuleHook, bindingType sh_op_types. return fmt.Errorf("error on commit values: %w", err) } - logEntry.Debugf("Module hook '%s': dynamic module '%s' values updated:\n%s", h.GetName(), bm.Name, bm.valuesStorage.GetValues(false).DebugString()) + logEntry.Debug("Module hook: dynamic module values updated", + slog.String("hook", h.GetName()), + slog.String("module", bm.Name), + slog.String("values", bm.valuesStorage.GetValues(false).DebugString())) } } - logEntry.Debugf("Module hook success %s/%s", bm.Name, h.GetName()) + logEntry.Debug("Module hook success", + slog.String("module", bm.Name), + slog.String("hook", h.GetName())) return nil } @@ -1061,7 +1102,9 @@ func (bm *BasicModule) Validate() error { valuesKey := utils.ModuleNameToValuesKey(bm.GetName()) restoredName := utils.ModuleNameFromValuesKey(valuesKey) - bm.logger.Infof("Validating module %q from %q", bm.GetName(), bm.GetPath()) + bm.logger.Info("Validating module", + slog.String("module", bm.GetName()), + slog.String("path", bm.GetPath())) if bm.GetName() != restoredName { return fmt.Errorf("'%s' name should be in kebab-case and be restorable from camelCase: consider renaming to '%s'", bm.GetName(), restoredName) diff --git a/pkg/module_manager/models/modules/global.go b/pkg/module_manager/models/modules/global.go index 606fcb7b..08676840 100644 --- a/pkg/module_manager/models/modules/global.go +++ b/pkg/module_manager/models/modules/global.go @@ -3,6 +3,7 @@ package modules import ( "encoding/json" "fmt" + "log/slog" "os" "path/filepath" "sort" @@ -83,7 +84,7 @@ func (gm *GlobalModule) WithLogger(logger *log.Logger) { // RegisterHooks finds and registers global hooks func (gm *GlobalModule) RegisterHooks() ([]*hooks.GlobalHook, error) { - gm.logger.Debugf("Search and register global hooks") + gm.logger.Debug("Search and register global hooks") hks, err := gm.searchAndRegisterHooks() if err != nil { @@ -171,7 +172,7 @@ func (gm *GlobalModule) executeHook(h *hooks.GlobalHook, bindingType sh_op_types logEntry := utils.EnrichLoggerWithLabels(gm.logger, logLabels) for _, info := range h.GetHookController().SnapshotsInfo() { - logEntry.Debugf("snapshot info: %s", info) + logEntry.Debug("snapshot info", slog.String("value", info)) } prefixedConfigValues := gm.valuesStorage.GetConfigValues(true) @@ -218,7 +219,8 @@ func (gm *GlobalModule) executeHook(h *hooks.GlobalHook, bindingType sh_op_types } if configValuesPatchResult != nil && configValuesPatchResult.ValuesChanged { - logEntry.Debugf("Global hook '%s': validate global config values before update", h.GetName()) + logEntry.Debug("Global hook: validate global config values before update", + slog.String("hook", h.GetName())) // Validate merged static and new values. // TODO: probably, we have to replace with with some transaction method on valuesStorage newValues, validationErr := gm.valuesStorage.GenerateNewConfigValues(configValuesPatchResult.Values, true) @@ -228,14 +230,17 @@ func (gm *GlobalModule) executeHook(h *hooks.GlobalHook, bindingType sh_op_types err := gm.dc.KubeConfigManager.SaveConfigValues(utils.GlobalValuesKey, configValuesPatchResult.Values) if err != nil { - logEntry.Debugf("Global hook '%s' kube config global values stay unchanged:\n%s", h.GetName(), gm.valuesStorage.GetConfigValues(false).DebugString()) + logEntry.Debug("Global hook kube config global values stay unchanged", + slog.String("hook", h.GetName()), + slog.String("value", gm.valuesStorage.GetConfigValues(false).DebugString())) return fmt.Errorf("global hook '%s': set kube config failed: %s", h.GetName(), err) } gm.valuesStorage.SaveConfigValues(newValues) - logEntry.Debugf("Global hook '%s': kube config global values updated", h.GetName()) - logEntry.Debugf("New kube config global values:\n%s\n", gm.valuesStorage.GetConfigValues(false).DebugString()) + logEntry.Debug("Global hook: kube config global values updated", slog.String("hook", h.GetName())) + logEntry.Debug("New kube config global values", + slog.String("values", gm.valuesStorage.GetConfigValues(false).DebugString())) } // Apply patches for *Enabled keys. @@ -256,7 +261,8 @@ func (gm *GlobalModule) executeHook(h *hooks.GlobalHook, bindingType sh_op_types // MemoryValuesPatch from global hook can contains patches for *Enabled keys // and no patches for 'global' section — valuesPatchResult will be nil in this case. if valuesPatchResult != nil && valuesPatchResult.ValuesChanged { - logEntry.Debugf("Global hook '%s': validate global values before update", h.GetName()) + logEntry.Debug("Global hook: validate global values before update", + slog.String("hook", h.GetName())) validationErr := gm.valuesStorage.validateValues(valuesPatchResult.Values) if validationErr != nil { return fmt.Errorf("cannot apply values patch for global values: %w", validationErr) @@ -268,8 +274,9 @@ func (gm *GlobalModule) executeHook(h *hooks.GlobalHook, bindingType sh_op_types return fmt.Errorf("error on commit values: %w", err) } - logEntry.Debugf("Global hook '%s': kube global values updated", h.GetName()) - logEntry.Debugf("New global values:\n%s", gm.valuesStorage.GetValues(false).DebugString()) + logEntry.Debug("Global hook: kube global values updated", slog.String("hook", h.GetName())) + logEntry.Debug("New global values", + slog.String("values", gm.valuesStorage.GetValues(false).DebugString())) } // Apply patches for *Enabled keys. @@ -416,10 +423,12 @@ func (gm *GlobalModule) searchAndRegisterHooks() ([]*hooks.GlobalHook, error) { return nil, fmt.Errorf("search module hooks failed: %w", err) } - gm.logger.Debugf("Found %d global hooks", len(hks)) + gm.logger.Debug("Found global hooks", slog.Int("count", len(hks))) if gm.logger.GetLevel() == log.LevelDebug { for _, h := range hks { - gm.logger.Debugf("GlobalHook: Name=%s, Path=%s", h.GetName(), h.GetPath()) + gm.logger.Debug("GlobalHook", + slog.String("hook", h.GetName()), + slog.String("path", h.GetPath())) } } @@ -452,7 +461,9 @@ func (gm *GlobalModule) searchAndRegisterHooks() ([]*hooks.GlobalHook, error) { gm.byBinding[binding] = append(gm.byBinding[binding], globalHook) } - hookLogEntry.Debugf("Module hook from '%s'. Bindings: %s", globalHook.GetPath(), globalHook.GetConfigDescription()) + hookLogEntry.Debug("Module hook from path", + slog.String("path", globalHook.GetPath()), + slog.String("bindings", globalHook.GetConfigDescription())) } return hks, nil @@ -461,7 +472,7 @@ func (gm *GlobalModule) searchAndRegisterHooks() ([]*hooks.GlobalHook, error) { // searchGlobalHooks recursively find all executables in hooksDir. Absent hooksDir is not an error. func (gm *GlobalModule) searchGlobalHooks() (hks []*hooks.GlobalHook, err error) { if gm.hooksDir == "" { - gm.logger.Warnf("Global hooks directory path is empty! No global hooks to load.") + gm.logger.Warn("Global hooks directory path is empty! No global hooks to load.") return nil, nil } @@ -521,7 +532,8 @@ func (gm *GlobalModule) searchGlobalShellHooks(hooksDir string) (hks []*kind.She // sort hooks by path sort.Strings(hooksRelativePaths) - gm.logger.Debugf("Hook paths: %+v", hooksRelativePaths) + gm.logger.Debug("Hook paths", + slog.Any("paths", hooksRelativePaths)) for _, hookPath := range hooksRelativePaths { hookName, err := filepath.Rel(hooksDir, hookPath) @@ -545,7 +557,9 @@ func (gm *GlobalModule) searchGlobalShellHooks(hooksDir string) (hks []*kind.She if len(hks) > 0 { count = strconv.Itoa(len(hks)) } - gm.logger.Infof("Found %s global shell hooks in '%s'", count, hooksDir) + gm.logger.Info("Found global shell hooks in dir", + slog.String("count", count), + slog.String("dir", hooksDir)) return } @@ -570,7 +584,8 @@ func (gm *GlobalModule) searchGlobalBatchHooks(hooksDir string) (hks []*kind.Bat // sort hooks by path sort.Strings(hooksRelativePaths) - gm.logger.Debugf("Hook paths: %+v", hooksRelativePaths) + gm.logger.Debug("Hook paths", + slog.Any("path", hooksRelativePaths)) for _, hookPath := range hooksRelativePaths { hookName, err := filepath.Rel(hooksDir, hookPath) @@ -596,7 +611,9 @@ func (gm *GlobalModule) searchGlobalBatchHooks(hooksDir string) (hks []*kind.Bat count = strconv.Itoa(len(hks)) } - gm.logger.Infof("Found %s global shell hooks in '%s'", count, hooksDir) + gm.logger.Info("Found global shell hooks in dir", + slog.String("count", count), + slog.String("dir", hooksDir)) return } @@ -609,7 +626,8 @@ func (gm *GlobalModule) searchGlobalGoHooks() ([]*kind.GoHook, error) { if len(goHooks) > 0 { count = strconv.Itoa(len(goHooks)) } - gm.logger.Infof("Found %s global Go hooks", count) + gm.logger.Info("Found global Go hooks", + slog.String("count", count)) return goHooks, nil } diff --git a/pkg/module_manager/models/modules/helm.go b/pkg/module_manager/models/modules/helm.go index 926fcd14..c62e89d7 100644 --- a/pkg/module_manager/models/modules/helm.go +++ b/pkg/module_manager/models/modules/helm.go @@ -3,6 +3,7 @@ package modules import ( "context" "fmt" + "log/slog" "os" "path/filepath" "runtime/trace" @@ -93,7 +94,8 @@ func NewHelmModule(bm *BasicModule, namespace string, tmpDir string, deps *HelmM } if !isHelm { - hm.logger.Infof("module %q has neither Chart.yaml nor templates/ dir, is't not a helm chart", bm.Name) + hm.logger.Info("module has neither Chart.yaml nor templates/ dir, is't not a helm chart", + slog.String("name", bm.Name)) return nil, nil } @@ -121,7 +123,7 @@ func (hm *HelmModule) isHelmChart() (bool, error) { if err == nil { return true, hm.createChartYaml(chartPath) } - if err != nil && os.IsNotExist(err) { + if os.IsNotExist(err) { // if templates not exists - it's not a helm module return false, nil } @@ -206,7 +208,7 @@ func (hm *HelmModule) RunHelmInstall(logLabels map[string]string) error { if err != nil { return err } - logEntry.Debugf("chart has %d resources", len(manifests)) + logEntry.Debug("chart has resources", slog.Int("count", len(manifests))) // Skip upgrades if nothing is changed var runUpgradeRelease bool @@ -275,7 +277,7 @@ func (hm *HelmModule) shouldRunHelmUpgrade(helmClient client.HelmClient, release revision, status, err := helmClient.LastReleaseStatus(releaseName) if revision == "0" { - logEntry.Debugf("helm release '%s' not exists: should run upgrade", releaseName) + logEntry.Debug("helm release not exists: should run upgrade", slog.String("release", releaseName)) return true, nil } @@ -285,21 +287,26 @@ func (hm *HelmModule) shouldRunHelmUpgrade(helmClient client.HelmClient, release // Run helm upgrade if last release isn't `deployed` if strings.ToLower(status) != "deployed" { - logEntry.Debugf("helm release '%s' has %s status: should run upgrade", releaseName, strings.ToLower(status)) + logEntry.Debug("helm release: should run upgrade", + slog.String("release", releaseName), + slog.String("status", strings.ToLower(status))) return true, nil } // Get values for a non-failed release. releaseValues, err := helmClient.GetReleaseValues(releaseName) if err != nil { - logEntry.Debugf("helm release '%s' get values error, no upgrade: %v", releaseName, err) + logEntry.Debug("helm release get values error, no upgrade", + slog.String("release", releaseName), + log.Err(err)) return false, err } // Run helm upgrade if there is no stored checksum recordedChecksum, hasKey := releaseValues["_addonOperatorModuleChecksum"] if !hasKey { - logEntry.Debugf("helm release '%s' has no saved checksum of values: should run upgrade", releaseName) + logEntry.Debug("helm release has no saved checksum of values: should run upgrade", + slog.String("release", releaseName)) return true, nil } @@ -307,7 +314,10 @@ func (hm *HelmModule) shouldRunHelmUpgrade(helmClient client.HelmClient, release // Run helm upgrade if checksum is changed. if recordedChecksumStr, ok := recordedChecksum.(string); ok { if recordedChecksumStr != checksum { - logEntry.Debugf("helm release '%s' checksum '%s' is changed to '%s': should run upgrade", releaseName, recordedChecksumStr, checksum) + logEntry.Debug("helm release checksum is changed: should run upgrade", + slog.String("release", releaseName), + slog.String("checksum", recordedChecksumStr), + slog.String("newChecksum", checksum)) return true, nil } } @@ -320,11 +330,14 @@ func (hm *HelmModule) shouldRunHelmUpgrade(helmClient client.HelmClient, release // Run helm upgrade if there are absent resources if len(absent) > 0 { - logEntry.Debugf("helm release '%s' has %d absent resources: should run upgrade", releaseName, len(absent)) + logEntry.Debug("helm release has absent resources: should run upgrade", + slog.String("release", releaseName), + slog.Int("count", len(absent))) return true, nil } - logEntry.Debugf("helm release '%s' is unchanged: skip release upgrade", releaseName) + logEntry.Debug("helm release is unchanged: skip release upgrade", + slog.String("release", releaseName)) return false, nil } @@ -340,7 +353,9 @@ func (hm *HelmModule) PrepareValuesYamlFile() (string, error) { return "", err } - log.Debugf("Prepared module %s helm values:\n%s", hm.name, hm.values.DebugString()) + log.Debug("Prepared module helm values info", + slog.String("moduleName", hm.name), + slog.String("values", hm.values.DebugString())) return path, nil } diff --git a/pkg/module_manager/models/modules/synchronization_state.go b/pkg/module_manager/models/modules/synchronization_state.go index b6300c10..64231617 100644 --- a/pkg/module_manager/models/modules/synchronization_state.go +++ b/pkg/module_manager/models/modules/synchronization_state.go @@ -2,6 +2,7 @@ package modules import ( "fmt" + "log/slog" "sync" "github.com/deckhouse/deckhouse/pkg/log" @@ -61,14 +62,16 @@ func (s *SynchronizationState) IsCompleted() bool { for _, state := range s.state { if !state.Done { done = false - log.Debugf("Synchronization isn't done for %s/%s", state.HookName, state.BindingName) + log.Debug("Synchronization isn't done", + slog.String("hook", state.HookName), + slog.String("binding", state.BindingName)) break } } return done } -// func (s *SynchronizationState) QueuedForBinding(metadata task.HookMetadata) { +// QueuedForBinding marks a Kubernetes binding as queued for synchronization func (s *SynchronizationState) QueuedForBinding(metadata TaskMetadata) { s.m.Lock() defer s.m.Unlock() @@ -93,7 +96,9 @@ func (s *SynchronizationState) DoneForBinding(id string) { state = &kubernetesBindingSynchronizationState{} s.state[id] = state } - log.Debugf("Synchronization done for %s/%s", state.HookName, state.BindingName) + log.Debug("Synchronization done", + slog.String("hook", state.HookName), + slog.String("binding", state.BindingName)) state.Done = true } @@ -101,6 +106,6 @@ func (s *SynchronizationState) DebugDumpState(logEntry *log.Logger) { s.m.RLock() defer s.m.RUnlock() for id, state := range s.state { - logEntry.Debugf("%s/%s: queued=%v done=%v id=%s", state.HookName, state.BindingName, state.Queued, state.Done, id) + logEntry.Debug(fmt.Sprintf("%s/%s: queued=%v done=%v id=%s", state.HookName, state.BindingName, state.Queued, state.Done, id)) } } diff --git a/pkg/module_manager/module_manager.go b/pkg/module_manager/module_manager.go index 2e323369..647cb8c0 100644 --- a/pkg/module_manager/module_manager.go +++ b/pkg/module_manager/module_manager.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "image" + "log/slog" "runtime/trace" "strings" "sync" @@ -190,7 +191,7 @@ func (mm *ModuleManager) GetGlobal() *modules.GlobalModule { func (mm *ModuleManager) ApplyNewKubeConfigValues(kubeConfig *config.KubeConfig, globalValuesChanged bool) error { if kubeConfig == nil { // have no idea, how it could be, just skip run - log.Warnf("No KubeConfig is set") + log.Warn("No KubeConfig is set") return nil } @@ -212,7 +213,8 @@ func (mm *ModuleManager) ApplyNewKubeConfigValues(kubeConfig *config.KubeConfig, newGlobalValues, ok := valuesMap[mm.global.GetName()] if ok { if globalValuesChanged { - log.Debugf("Applying global values: %v", newGlobalValues) + log.Debug("Applying global values", + slog.String("values", fmt.Sprintf("%v", newGlobalValues))) mm.global.SaveConfigValues(newGlobalValues) } delete(valuesMap, mm.global.GetName()) @@ -226,7 +228,10 @@ func (mm *ModuleManager) ApplyNewKubeConfigValues(kubeConfig *config.KubeConfig, } if mod.GetConfigValues(false).Checksum() != values.Checksum() { - log.Debugf("Applying values to %s module: new values: %v, previous ones: %v", moduleName, values, mod.GetConfigValues(false)) + log.Debug("Applying values to module", + slog.String("moduleName", moduleName), + slog.String("values", fmt.Sprintf("%v", values)), + slog.String("oldValues", fmt.Sprintf("%v", mod.GetConfigValues(false)))) mod.SaveConfigValues(values) } } @@ -293,7 +298,8 @@ func (mm *ModuleManager) warnAboutUnknownModules(kubeConfig *config.KubeConfig) } } if len(unknownNames) > 0 { - log.Warnf("KubeConfigManager has values for unknown modules: %+v", unknownNames) + log.Warn("KubeConfigManager has values for unknown modules", + slog.Any("modules", unknownNames)) } } @@ -398,7 +404,8 @@ func (mm *ModuleManager) RefreshStateFromHelmReleases(logLabels map[string]strin if err != nil { return nil, err } - log.Debugf("Following releases found: %v", releasedModules) + log.Debug("Following releases found", + slog.Any("modules", releasedModules)) return mm.stateFromHelmReleases(releasedModules), nil } @@ -416,7 +423,8 @@ func (mm *ModuleManager) stateFromHelmReleases(releases []string) *ModulesState purge = utils.SortReverse(purge) if len(purge) > 0 { - log.Infof("Modules to purge found: %v", purge) + log.Info("Modules to purge found", + slog.Any("modules", purge)) } return &ModulesState{ @@ -465,7 +473,8 @@ func (mm *ModuleManager) RefreshEnabledState(logLabels map[string]string) (*Modu return nil, err } - logEntry.Infof("Enabled modules: %+v", enabledModules) + logEntry.Info("Enabled modules", + slog.Any("modules", enabledModules)) once.Do(mm.modules.SetInited) var ( @@ -486,13 +495,10 @@ func (mm *ModuleManager) RefreshEnabledState(logLabels map[string]string) (*Modu modulesToDisable = utils.SortReverseByReference(modulesToDisable, mm.modules.NamesInOrder()) modulesToEnable = utils.SortByReference(modulesToEnable, mm.modules.NamesInOrder()) - logEntry.Debugf("Refresh state results:\n"+ - " enabledModules: %v\n"+ - " modulesToDisable: %v\n"+ - " modulesToEnable: %v\n", - enabledModules, - modulesToDisable, - modulesToEnable) + logEntry.Debug("Refresh state results", + slog.Any("enabledModules", enabledModules), + slog.Any("modulesToDisable", modulesToDisable), + slog.Any("modulesToEnable", modulesToEnable)) // We've to ignore enabledModules patch in case default moduleLoader is in use, otherwise it breaks applying global hooks patches with default moduleLoader switch mm.moduleLoader.(type) { @@ -605,9 +611,12 @@ func (mm *ModuleManager) DeleteModule(moduleName string, logLabels map[string]st releaseExists, err := mm.dependencies.Helm.NewClient(mm.logger, deleteLogLabels).IsReleaseExists(ml.GetName()) if !releaseExists { if err != nil { - logEntry.Warnf("Cannot find helm release '%s' for module '%s'. Helm error: %s", ml.GetName(), ml.GetName(), err) + logEntry.Warn("Cannot find helm release for module", + slog.String("module", ml.GetName()), + log.Err(err)) } else { - logEntry.Warnf("Cannot find helm release '%s' for module '%s'.", ml.GetName(), ml.GetName()) + logEntry.Warn("Cannot find helm release for module.", + slog.String("module", ml.GetName())) } } else { // Chart and release are existed, so run helm delete command @@ -860,12 +869,18 @@ func (mm *ModuleManager) applyEnabledPatch(enabledPatch utils.ValuesPatch, exten } switch op.Op { case "add": - log.Debugf("apply dynamic enable: module %s set to '%v'", modName, *v) + log.Debug("apply dynamic enable", + slog.String("module", modName), + slog.Bool("value", *v)) case "remove": - log.Debugf("apply dynamic enable: module %s removed from dynamic enable", modName) + log.Debug("apply dynamic enable: module removed from dynamic enable", + slog.String("module", modName)) } extender.UpdateStatus(modName, op.Op, *v) - log.Infof("dynamically enabled module status change: module %s, operation %s, state %v", modName, op.Op, *v) + log.Info("dynamically enabled module status change", + slog.String("module", modName), + slog.String("operation", op.Op), + slog.Bool("state", *v)) } return nil @@ -1022,7 +1037,7 @@ func (mm *ModuleManager) RunModuleWithNewOpenAPISchema(moduleName, moduleSource, // RegisterModule checks if a module already exists and reapplies(reloads) its configuration. // If it's a new module - converges all modules - EXPERIMENTAL func (mm *ModuleManager) RegisterModule(_, _ string) error { - return fmt.Errorf("Not implemented yet") + return fmt.Errorf("not implemented yet") } /* @@ -1234,12 +1249,12 @@ func queueHasPendingModuleDeleteTask(q *queue.TaskQueue, moduleName string) bool // registerModules load all available modules from modules directory. func (mm *ModuleManager) registerModules(scriptEnabledExtender *script_extender.Extender) error { if mm.ModulesDir == "" { - log.Warnf("Empty modules directory is passed! No modules to load.") + log.Warn("Empty modules directory is passed! No modules to load.") return nil } if mm.moduleLoader == nil { - log.Errorf("no module loader set") + log.Error("no module loader set") return fmt.Errorf("no module loader set") } @@ -1263,7 +1278,9 @@ func (mm *ModuleManager) registerModules(scriptEnabledExtender *script_extender. for _, mod := range mods { if set.Has(mod.GetName()) { - log.Warnf("module %q from path %q is not registered, because it has a duplicate", mod.GetName(), mod.GetPath()) + log.Warn("module is not registered, because it has a duplicate", + slog.String("module", mod.GetName()), + slog.String("path", mod.GetPath())) continue } @@ -1282,7 +1299,8 @@ func (mm *ModuleManager) registerModules(scriptEnabledExtender *script_extender. }) } - log.Debugf("Found modules: %v", set.NamesInOrder()) + log.Debug("Found modules", + slog.Any("modules", set.NamesInOrder())) mm.modules = set diff --git a/pkg/module_manager/scheduler/extenders/dynamically_enabled/dynamic.go b/pkg/module_manager/scheduler/extenders/dynamically_enabled/dynamic.go index c58ae691..1630fa2a 100644 --- a/pkg/module_manager/scheduler/extenders/dynamically_enabled/dynamic.go +++ b/pkg/module_manager/scheduler/extenders/dynamically_enabled/dynamic.go @@ -2,6 +2,7 @@ package dynamically_enabled import ( "context" + "log/slog" "sync" "github.com/deckhouse/deckhouse/pkg/log" @@ -43,7 +44,8 @@ func (e *Extender) UpdateStatus(moduleName, operation string, value bool) { e.sendNotify() } default: - log.Warnf("Unknown patch operation: %s", operation) + log.Warn("Unknown patch operation", + slog.String("operation", operation)) } e.l.Unlock() } diff --git a/pkg/module_manager/scheduler/extenders/script_enabled/script.go b/pkg/module_manager/scheduler/extenders/script_enabled/script.go index d2d85f8f..19ec5ac5 100644 --- a/pkg/module_manager/scheduler/extenders/script_enabled/script.go +++ b/pkg/module_manager/scheduler/extenders/script_enabled/script.go @@ -3,6 +3,7 @@ package script_enabled import ( "errors" "fmt" + "log/slog" "os" "path/filepath" @@ -66,7 +67,8 @@ func (e *Extender) AddBasicModule(module node.ModuleInterface) { if err != nil { if os.IsNotExist(err) { moduleD.scriptState = noEnabledScript - log.Debugf("MODULE '%s' is ENABLED. Enabled script doesn't exist!", module.GetName()) + log.Debug("MODULE is ENABLED. Enabled script doesn't exist!", + slog.String("module", module.GetName())) } else { moduleD.scriptState = statError moduleD.stateDescription = fmt.Sprintf("Cannot stat enabled script for '%s' module: %v", module.GetName(), err) @@ -75,7 +77,8 @@ func (e *Extender) AddBasicModule(module node.ModuleInterface) { } else { if !utils_file.IsFileExecutable(f) { moduleD.scriptState = nonExecutableScript - log.Warnf("Found non-executable enabled script for '%s' module - assuming enabled state", module.GetName()) + log.Warn("Found non-executable enabled script for module - assuming enabled state", + slog.String("module", module.GetName())) } } e.basicModuleDescriptors[module.GetName()] = moduleD @@ -108,10 +111,12 @@ func (e *Extender) Filter(moduleName string, logLabels map[string]string) (*bool err = errors.New(moduleDescriptor.stateDescription) case nonExecutableScript: - log.Warnf("Found non-executable enabled script for '%s' module - assuming enabled state", moduleDescriptor.module.GetName()) + log.Warn("Found non-executable enabled script for module - assuming enabled state", + slog.String("module", moduleDescriptor.module.GetName())) case noEnabledScript: - log.Debugf("MODULE '%s' is ENABLED. Enabled script doesn't exist!", moduleDescriptor.module.GetName()) + log.Debug("MODULE is ENABLED. Enabled script doesn't exist!", + slog.String("module", moduleDescriptor.module.GetName())) } if err != nil { diff --git a/pkg/module_manager/scheduler/extenders/static/static.go b/pkg/module_manager/scheduler/extenders/static/static.go index d4fbb974..f275e490 100644 --- a/pkg/module_manager/scheduler/extenders/static/static.go +++ b/pkg/module_manager/scheduler/extenders/static/static.go @@ -1,6 +1,7 @@ package static import ( + "log/slog" "os" "path/filepath" "strings" @@ -28,19 +29,22 @@ func NewExtender(staticValuesFilePaths string) (*Extender, error) { valuesFile := filepath.Join(dir, "values.yaml") fileInfo, err := os.Stat(valuesFile) if err != nil { - log.Errorf("Couldn't stat %s", valuesFile) + log.Error("Couldn't stat file", + slog.String("file", valuesFile)) continue } if fileInfo.IsDir() { - log.Errorf("File %s is a directory", valuesFile) + log.Error("File is a directory", + slog.String("file", valuesFile)) continue } f, err := os.Open(valuesFile) if err != nil { if os.IsNotExist(err) { - log.Debugf("File %s doesn't exist", valuesFile) + log.Debug("File doesn't exist", + slog.String("file", valuesFile)) continue } return nil, err diff --git a/pkg/module_manager/scheduler/scheduler.go b/pkg/module_manager/scheduler/scheduler.go index b856cd01..49e2ef4d 100644 --- a/pkg/module_manager/scheduler/scheduler.go +++ b/pkg/module_manager/scheduler/scheduler.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "image" + "log/slog" "slices" "strings" "sync" @@ -294,7 +295,8 @@ func (s *Scheduler) addWeightVertex(vertex *node.Node) error { func (s *Scheduler) ApplyExtenders(extendersEnv string) error { appliedExtenders := []extenders.ExtenderName{} if len(extendersEnv) == 0 { - log.Warnf("ADDON_OPERATOR_APPLIED_MODULE_EXTENDERS variable isn't set - default list of %s will be applied", defaultAppliedExtenders) + log.Warn("ADDON_OPERATOR_APPLIED_MODULE_EXTENDERS variable isn't set - default list will be applied", + slog.Any("values", defaultAppliedExtenders)) appliedExtenders = defaultAppliedExtenders } else { availableExtenders := make(map[extenders.ExtenderName]bool, len(s.extenders)) @@ -350,8 +352,8 @@ func (s *Scheduler) ApplyExtenders(extendersEnv string) error { finalList = append(finalList, e.ext.Name()) } - log.Infof("The list of applied module extenders: %s", finalList) - + log.Info("The list of applied module extenders", + slog.Any("finalList", finalList)) return nil } @@ -647,12 +649,13 @@ func (s *Scheduler) GetGraphState(logLabels map[string]string) ( /*enabled modul // graph hasn't been initialized yet if s.enabledModules == nil { - logEntry.Infof("Module Scheduler: graph hasn't been calculated yet") + logEntry.Info("Module Scheduler: graph hasn't been calculated yet") recalculateGraph = true } if s.err != nil { - logEntry.Warnf("Module Scheduler: graph in a faulty state and will be recalculated: %s", s.err.Error()) + logEntry.Warn("Module Scheduler: graph in a faulty state and will be recalculated", + slog.String("error", s.err.Error())) recalculateGraph = true } @@ -782,7 +785,7 @@ outerCycle: if graphErr != nil { s.err = graphErr - logEntry.Warnf("Module Scheduler: Graph converge failed: %s", s.err.Error()) + logEntry.Warn("Module Scheduler: Graph converge failed", log.Err(s.err)) return true, nil } @@ -830,7 +833,8 @@ outerCycle: s.enabledModules = &enabledModules // reset any previous errors s.err = nil - logEntry.Debugf("Graph was successfully updated, diff: [%v]", s.diff) + logEntry.Debug("Graph was successfully updated", + slog.String("diff", fmt.Sprintf("%v", s.diff))) metaDiffSlice := make([]string, 0, len(metaDiff)) for moduleName := range metaDiff { diff --git a/pkg/task/hook_metadata.go b/pkg/task/hook_metadata.go index bcfbb569..6d7a1a0b 100644 --- a/pkg/task/hook_metadata.go +++ b/pkg/task/hook_metadata.go @@ -3,6 +3,7 @@ package task import ( "context" "fmt" + "log/slog" "sort" "strings" "sync" @@ -104,12 +105,14 @@ var ( func HookMetadataAccessor(t task.Task) (meta HookMetadata) { taskMeta := t.GetMetadata() if taskMeta == nil { - log.Errorf("Possible Bug! task metadata is nil") + log.Error("Possible Bug! task metadata is nil") return } meta, ok := taskMeta.(HookMetadata) if !ok { - log.Errorf("Possible Bug! task '%s' metadata is not of type ModuleHookMetadata: got %T", t.GetType(), meta) + log.Error("Possible Bug! task metadata is not of type ModuleHookMetadata", + slog.String("type", string(t.GetType())), + slog.String("got", fmt.Sprintf("%T", meta))) return } return diff --git a/pkg/utils/loader.go b/pkg/utils/loader.go index 15f5f4ea..0b4d7854 100644 --- a/pkg/utils/loader.go +++ b/pkg/utils/loader.go @@ -2,6 +2,7 @@ package utils import ( "fmt" + "log/slog" "os" "path/filepath" "strings" @@ -33,7 +34,9 @@ func LoadValuesFileFromDir(dir string, strictModeEnabled bool) (Values, error) { valuesFilePath := filepath.Join(dir, ValuesFileName) valuesYaml, err := os.ReadFile(valuesFilePath) if err != nil && os.IsNotExist(err) && !strictModeEnabled { - log.Debugf("No static values file '%s': %v", valuesFilePath, err) + log.Debug("No static values file", + slog.String("path", valuesFilePath), + log.Err(err)) return nil, nil } if err != nil { diff --git a/pkg/utils/values.go b/pkg/utils/values.go index 7e7430df..13760e91 100644 --- a/pkg/utils/values.go +++ b/pkg/utils/values.go @@ -3,6 +3,7 @@ package utils import ( "encoding/json" "fmt" + "log/slog" "reflect" "github.com/davecgh/go-spew/spew" @@ -127,7 +128,7 @@ func (v Values) Global() Values { data := map[string]interface{}{GlobalValuesKey: globalValues} newV, err := NewValues(data) if err != nil { - log.Errorf("get global Values: %s", err) + log.Error("get global Values", log.Err(err)) } return newV } @@ -141,7 +142,9 @@ func (v Values) SectionByKey(key string) Values { data := map[string]interface{}{key: sectionValues} newV, err := NewValues(data) if err != nil { - log.Errorf("get section '%s' Values: %s", key, err) + log.Error("get section Values", + slog.String("key", key), + log.Err(err)) } return newV } diff --git a/pkg/values/validation/schemas.go b/pkg/values/validation/schemas.go index badf7bae..fa596f76 100644 --- a/pkg/values/validation/schemas.go +++ b/pkg/values/validation/schemas.go @@ -3,6 +3,7 @@ package validation import ( "encoding/json" "fmt" + "log/slog" "reflect" "strings" @@ -86,7 +87,9 @@ func (st *SchemaStorage) ValidateValues(moduleName string, values utils.Values) func (st *SchemaStorage) Validate(valuesType SchemaType, moduleName string, values utils.Values) error { schema := st.Schemas[valuesType] if schema == nil { - log.Warnf("schema (%s) for '%s' values is not found", moduleName, valuesType) + log.Warn("schema is not found", + slog.String("module", moduleName), + slog.String("valuesType", string(valuesType))) return nil } @@ -97,9 +100,12 @@ func (st *SchemaStorage) Validate(valuesType SchemaType, moduleName string, valu validationErr := validateObject(obj, schema, moduleName) if validationErr == nil { - log.Debugf("'%s' values are valid", valuesType) + log.Debug("values are valid", + slog.String("valuesType", string(valuesType))) } else { - log.Debugf("'%s' values are NOT valid: %v", valuesType, validationErr) + log.Debug("values are NOT valid", + slog.String("valuesType", string(valuesType)), + log.Err(validationErr)) } return validationErr }