diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index 50967b7fd0..2a79022669 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -54,7 +54,7 @@ func registerSidecar(app *extkingpin.App) { ReloadURL: reloader.ReloadURLFromBase(conf.prometheus.url), CfgFile: conf.reloader.confFile, CfgOutputFile: conf.reloader.envVarConfFile, - RuleDirs: conf.reloader.ruleDirectories, + WatchedDirs: conf.reloader.ruleDirectories, WatchInterval: conf.reloader.watchInterval, RetryInterval: conf.reloader.retryInterval, }) diff --git a/pkg/reloader/example_test.go b/pkg/reloader/example_test.go index dfc69a69c8..0ee46e8b30 100644 --- a/pkg/reloader/example_test.go +++ b/pkg/reloader/example_test.go @@ -24,7 +24,7 @@ func ExampleReloader() { ReloadURL: reloader.ReloadURLFromBase(u), CfgFile: "/path/to/cfg", CfgOutputFile: "/path/to/cfg.out", - RuleDirs: []string{"/path/to/dirs"}, + WatchedDirs: []string{"/path/to/dirs"}, WatchInterval: 3 * time.Minute, RetryInterval: 5 * time.Second, }) diff --git a/pkg/reloader/reloader.go b/pkg/reloader/reloader.go index 37d45cb837..997cd6b3a1 100644 --- a/pkg/reloader/reloader.go +++ b/pkg/reloader/reloader.go @@ -9,9 +9,9 @@ // * Watch on changes against certain file e.g (`cfgFile`). // * Optionally, specify different different output file for watched `cfgFile` (`cfgOutputFile`). // This will also try decompress the `cfgFile` if needed and substitute ALL the envvars using Kubernetes substitution format: (`$(var)`) -// * Watch on changes against certain directories (`ruleDires`). +// * Watch on changes against certain directories (`watchedDirs`). // -// Once any of those two changes Prometheus on given `reloadURL` will be notified, causing Prometheus to reload configuration and rules. +// Once any of those two changes, Prometheus on given `reloadURL` will be notified, causing Prometheus to reload configuration and rules. // // This and below for reloader: // @@ -20,7 +20,7 @@ // ReloadURL: reloader.ReloadURLFromBase(u), // CfgFile: "/path/to/cfg", // CfgOutputFile: "/path/to/cfg.out", -// RuleDirs: []string{"/path/to/dirs"}, +// WatchedDirs: []string{"/path/to/dirs"}, // WatchInterval: 3 * time.Minute, // RetryInterval: 5 * time.Second, // }) @@ -86,12 +86,12 @@ type Reloader struct { reloadURL *url.URL cfgFile string cfgOutputFile string - ruleDirs []string + watchedDirs []string watchInterval time.Duration retryInterval time.Duration - lastCfgHash []byte - lastRuleHash []byte + lastCfgHash []byte + lastWatchedDirsHash []byte reloads prometheus.Counter reloadErrors prometheus.Counter @@ -112,9 +112,9 @@ type Options struct { // will be substituted and the output written into the given path. Prometheus should then use // cfgOutputFile as its config file path. CfgOutputFile string - // RuleDirs is a collection of paths for this reloader to watch over. - RuleDirs []string - // WatchInterval controls how often reloader re-reads config and rules. + // WatchedDirs is a collection of paths for this reloader to watch over. + WatchedDirs []string + // WatchInterval controls how often reloader re-reads config and directories. WatchInterval time.Duration // RetryInterval controls how often reloader retries config reload in case of error. RetryInterval time.Duration @@ -122,7 +122,7 @@ type Options struct { var firstGzipBytes = []byte{0x1f, 0x8b, 0x08} -// New creates a new reloader that watches the given config file and rule directory +// New creates a new reloader that watches the given config file and directories // and triggers a Prometheus reload upon changes. func New(logger log.Logger, reg prometheus.Registerer, o *Options) *Reloader { if logger == nil { @@ -133,7 +133,7 @@ func New(logger log.Logger, reg prometheus.Registerer, o *Options) *Reloader { reloadURL: o.ReloadURL, cfgFile: o.CfgFile, cfgOutputFile: o.CfgOutputFile, - ruleDirs: o.RuleDirs, + watchedDirs: o.WatchedDirs, watchInterval: o.WatchInterval, retryInterval: o.RetryInterval, @@ -182,12 +182,12 @@ func (r *Reloader) WithWatchInterval(duration time.Duration) { r.watchInterval = duration } -// Watch starts to watch periodically the config file and rules and process them until the context +// Watch starts to watch periodically the config file and directories and process them until the context // gets canceled. Config file gets env expanded if cfgOutputFile is specified and reload is trigger if -// config or rules changed. +// config or directories changed. // Watch watchers periodically based on r.watchInterval. // For config file it watches it directly as well via fsnotify. -// It watches rule dirs as well, but lot's of edge cases are missing, so rely on interval mostly. +// It watches directories as well, but lot's of edge cases are missing, so rely on interval mostly. func (r *Reloader) Watch(ctx context.Context) error { watcher, err := fsnotify.NewWatcher() if err != nil { @@ -207,11 +207,11 @@ func (r *Reloader) Watch(ctx context.Context) error { } } - // Watch rule dirs in best effort manner. - for _, ruleDir := range r.ruleDirs { - watchables[filepath.Dir(ruleDir)] = struct{}{} - if err := watcher.Add(ruleDir); err != nil { - return errors.Wrapf(err, "add rule dir %s to watcher", ruleDir) + // Watch directories in best effort manner. + for _, dir := range r.watchedDirs { + watchables[filepath.Dir(dir)] = struct{}{} + if err := watcher.Add(dir); err != nil { + return errors.Wrapf(err, "add dir %s to watcher", dir) } } @@ -220,10 +220,10 @@ func (r *Reloader) Watch(ctx context.Context) error { r.watches.Set(float64(len(watchables))) level.Info(r.logger).Log( - "msg", "started watching config file and recursively rule dirs for changes", + "msg", "started watching config file and directories for changes", "cfg", r.cfgFile, "out", r.cfgOutputFile, - "dirs", strings.Join(r.ruleDirs, ",")) + "dirs", strings.Join(r.watchedDirs, ",")) for { select { @@ -253,8 +253,8 @@ func (r *Reloader) Watch(ctx context.Context) error { // Reload is retried in retryInterval until watchInterval. func (r *Reloader) apply(ctx context.Context) error { var ( - cfgHash []byte - ruleHash []byte + cfgHash []byte + watchedDirsHash []byte ) if r.cfgFile != "" { h := sha256.New() @@ -301,10 +301,10 @@ func (r *Reloader) apply(ctx context.Context) error { } h := sha256.New() - for _, ruleDir := range r.ruleDirs { - walkDir, err := filepath.EvalSymlinks(ruleDir) + for _, dir := range r.watchedDirs { + walkDir, err := filepath.EvalSymlinks(dir) if err != nil { - return errors.Wrap(err, "ruleDir symlink eval") + return errors.Wrap(err, "dir symlink eval") } err = filepath.Walk(walkDir, func(path string, f os.FileInfo, err error) error { if err != nil { @@ -332,11 +332,11 @@ func (r *Reloader) apply(ctx context.Context) error { return errors.Wrap(err, "build hash") } } - if len(r.ruleDirs) > 0 { - ruleHash = h.Sum(nil) + if len(r.watchedDirs) > 0 { + watchedDirsHash = h.Sum(nil) } - if bytes.Equal(r.lastCfgHash, cfgHash) && bytes.Equal(r.lastRuleHash, ruleHash) { + if bytes.Equal(r.lastCfgHash, cfgHash) && bytes.Equal(r.lastWatchedDirsHash, watchedDirsHash) { // Nothing to do. return nil } @@ -353,12 +353,12 @@ func (r *Reloader) apply(ctx context.Context) error { } r.lastCfgHash = cfgHash - r.lastRuleHash = ruleHash + r.lastWatchedDirsHash = watchedDirsHash level.Info(r.logger).Log( "msg", "Prometheus reload triggered", "cfg_in", r.cfgFile, "cfg_out", r.cfgOutputFile, - "rule_dirs", strings.Join(r.ruleDirs, ", ")) + "watched_dirs", strings.Join(r.watchedDirs, ", ")) return nil }); err != nil { level.Error(r.logger).Log("msg", "Failed to trigger reload. Retrying.", "err", err) diff --git a/pkg/reloader/reloader_test.go b/pkg/reloader/reloader_test.go index 8a6071c2b3..624f40757f 100644 --- a/pkg/reloader/reloader_test.go +++ b/pkg/reloader/reloader_test.go @@ -72,7 +72,7 @@ func TestReloader_ConfigApply(t *testing.T) { ReloadURL: reloadURL, CfgFile: input, CfgOutputFile: output, - RuleDirs: nil, + WatchedDirs: nil, WatchInterval: 9999 * time.Hour, // Disable interval to test watch logic only. RetryInterval: 100 * time.Millisecond, }) @@ -205,7 +205,7 @@ func TestReloader_RuleApply(t *testing.T) { ReloadURL: reloadURL, CfgFile: "", CfgOutputFile: "", - RuleDirs: []string{dir, path.Join(dir, "rule-dir")}, + WatchedDirs: []string{dir, path.Join(dir, "rule-dir")}, WatchInterval: 100 * time.Millisecond, RetryInterval: 100 * time.Millisecond, })