Skip to content

Commit

Permalink
config: Validate timeout and interval of the scrape configuration
Browse files Browse the repository at this point in the history
In general we want to stick to the configured interval and don't
overrun because of slow or timing out scraping.

Go's /debug/pprof/profile endpoint will start profiling when being
invoked, has no support for concurrency and the timeout can only
be configured in seconds. The existing code alternated between using
"Seconds", "Delta"+ScrapeInterval but none of these work correctly
when enforcing timeout <= interval.

Make sure that "seconds" < ScrapeTimeout <= ScrapeInterval and adjust
the default configuration to honor this. In the long run we should
consider treating this endpoint entirely different to the others (e.g.
jitter scraping instead of sticking to interval, profile for a longer
duration).

Signed-off-by: Holger Hans Peter Freyther <holger@moiji-mobile.com>
  • Loading branch information
zecke committed Oct 10, 2021
1 parent 2caea71 commit 9f7ed78
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 4 deletions.
3 changes: 2 additions & 1 deletion parca.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ debug_info:

scrape_configs:
- job_name: "default"
scrape_interval: "1s"
scrape_interval: "3s"
scrape_timeout: "2s"
static_configs:
- targets: [ '127.0.0.1:7070' ]
23 changes: 21 additions & 2 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ import (
"gopkg.in/yaml.v2"
)

const (
pprofProcessCpu string = "process_cpu"
)

// Config holds all the configuration information for Parca
type Config struct {
DebugInfo *debuginfo.Config `yaml:"debug_info"`
Expand All @@ -45,7 +49,7 @@ func trueValue() *bool {
func DefaultScrapeConfig() ScrapeConfig {
return ScrapeConfig{
ScrapeInterval: model.Duration(time.Second * 10),
ScrapeTimeout: model.Duration(time.Second * 11),
ScrapeTimeout: model.Duration(time.Second * 0),
Scheme: "http",
ProfilingConfig: &ProfilingConfig{
PprofConfig: PprofConfig{
Expand All @@ -65,7 +69,7 @@ func DefaultScrapeConfig() ScrapeConfig {
Enabled: trueValue(),
Path: "/debug/pprof/mutex",
},
"process_cpu": &PprofProfilingConfig{
pprofProcessCpu: &PprofProfilingConfig{
Enabled: trueValue(),
Delta: true,
Path: "/debug/pprof/profile",
Expand Down Expand Up @@ -217,6 +221,21 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
}

// Validate the scrape and timeout internal configuration. When /debug/pprof/profile scraping
// is enabled we need to make sure there is enough time to complete the scrape.
if c.ScrapeTimeout > c.ScrapeInterval {
return fmt.Errorf("scrape timeout must be smaller or equal to inverval for: %v", c.JobName)
}

if c.ScrapeTimeout == 0 {
c.ScrapeTimeout = c.ScrapeInterval
}
if cfg, ok := c.ProfilingConfig.PprofConfig[pprofProcessCpu]; ok {
if *cfg.Enabled && c.ScrapeTimeout < model.Duration(time.Second*2) {
return fmt.Errorf("%v scrape_timeout must be at least 2 seconds in %v", pprofProcessCpu, c.JobName)
}
}

return nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/scrape/target.go
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ func targetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig) ([]*Targe
}

if pcfg, found := cfg.ProfilingConfig.PprofConfig[profType]; found && pcfg.Delta {
params.Add("seconds", strconv.Itoa(int(time.Duration(cfg.ScrapeInterval)/time.Second)))
params.Add("seconds", strconv.Itoa(int(time.Duration(cfg.ScrapeTimeout)/time.Second)-1))
}

targets = append(targets, NewTarget(lbls, origLabels, params))
Expand Down

0 comments on commit 9f7ed78

Please sign in to comment.