Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement per-user rate limits #251

Merged
merged 2 commits into from
Jan 31, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 37 additions & 6 deletions distributor/distributor.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package distributor

import (
"errors"
"flag"
"fmt"
"hash/fnv"
Expand All @@ -12,6 +13,7 @@ import (
"github.com/mwitkow/go-grpc-middleware"
"github.com/opentracing/opentracing-go"
"golang.org/x/net/context"
"golang.org/x/time/rate"
"google.golang.org/grpc"

"github.com/prometheus/client_golang/prometheus"
Expand All @@ -28,6 +30,8 @@ import (
"github.com/weaveworks/cortex/util"
)

var errIngestionRateLimitExceeded = errors.New("ingestion rate limit exceeded")

var (
numClientsDesc = prometheus.NewDesc(
"cortex_distributor_ingester_clients",
Expand All @@ -46,6 +50,10 @@ type Distributor struct {
quit chan struct{}
done chan struct{}

// Per-user rate limiters.
ingestLimitersMtx sync.Mutex
ingestLimiters map[string]*rate.Limiter

queryDuration *prometheus.HistogramVec
receivedSamples prometheus.Counter
sendDuration *prometheus.HistogramVec
Expand Down Expand Up @@ -77,6 +85,8 @@ type Config struct {
HeartbeatTimeout time.Duration
RemoteTimeout time.Duration
ClientCleanupPeriod time.Duration
IngestionRateLimit float64
IngestionBurstSize int
}

// RegisterFlags adds the flags required to config this to the given FlagSet
Expand All @@ -86,6 +96,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
flag.DurationVar(&cfg.HeartbeatTimeout, "distributor.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.")
flag.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.")
flag.DurationVar(&cfg.ClientCleanupPeriod, "distributor.client-cleanup-period", 15*time.Second, "How frequently to clean up clients for ingesters that have gone away.")
flag.Float64Var(&cfg.IngestionRateLimit, "distributor.ingestion-rate-limit", 25000, "Per-user ingestion rate limit in samples per second.")
flag.IntVar(&cfg.IngestionBurstSize, "distributor.ingestion-burst-size", 50000, "Per-user allowed ingestion burst size (in number of samples).")
}

// New constructs a new Distributor
Expand All @@ -97,11 +109,12 @@ func New(cfg Config, ring ReadRing) (*Distributor, error) {
return nil, fmt.Errorf("MinReadSuccesses > ReplicationFactor: %d > %d", cfg.MinReadSuccesses, cfg.ReplicationFactor)
}
d := &Distributor{
cfg: cfg,
ring: ring,
clients: map[string]ingesterClient{},
quit: make(chan struct{}),
done: make(chan struct{}),
cfg: cfg,
ring: ring,
clients: map[string]ingesterClient{},
quit: make(chan struct{}),
done: make(chan struct{}),
ingestLimiters: map[string]*rate.Limiter{},
queryDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "cortex",
Name: "distributor_query_duration_seconds",
Expand Down Expand Up @@ -255,6 +268,11 @@ func (d *Distributor) Push(ctx context.Context, req *remote.WriteRequest) (*cort
samples := util.FromWriteRequest(req)
d.receivedSamples.Add(float64(len(samples)))

limiter := d.getOrCreateIngestLimiter(userID)
if !limiter.AllowN(time.Now(), len(samples)) {
return nil, errIngestionRateLimitExceeded
}

keys := make([]uint32, len(samples), len(samples))
for i, sample := range samples {
keys[i] = tokenForMetric(userID, sample.Metric)
Expand Down Expand Up @@ -286,7 +304,7 @@ func (d *Distributor) Push(ctx context.Context, req *remote.WriteRequest) (*cort
}

// This is just a shortcut - if there are not minSuccess available ingesters,
// after filtering out dead ones, don't even both trying.
// after filtering out dead ones, don't even bother trying.
if len(liveIngesters) < sampleTrackers[i].minSuccess {
return nil, fmt.Errorf("wanted at least %d live ingesters to process write, had %d",
sampleTrackers[i].minSuccess, len(liveIngesters))
Expand Down Expand Up @@ -320,6 +338,19 @@ func (d *Distributor) Push(ctx context.Context, req *remote.WriteRequest) (*cort
return &cortex.WriteResponse{}, nil
}

func (d *Distributor) getOrCreateIngestLimiter(userID string) *rate.Limiter {
d.ingestLimitersMtx.Lock()
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note I'm intentionally just going for a normal mutex here because I figured an RWLock didn't really matter in this path (access once per HTTP request).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd have probably gone with a RWMutex, I expecting each ingester to be able to handle ~1million samples/s, or 10k http requests a second. Right now they're only doing ~100 req/s, so its not a big deal. We can change later.

defer d.ingestLimitersMtx.Unlock()

if limiter, ok := d.ingestLimiters[userID]; ok {
return limiter
}

limiter := rate.NewLimiter(rate.Limit(d.cfg.IngestionRateLimit), d.cfg.IngestionBurstSize)
d.ingestLimiters[userID] = limiter
return limiter
}

func (d *Distributor) sendSamples(ctx context.Context, ingester *ring.IngesterDesc, sampleTrackers []*sampleTracker) error {
client, err := d.getClientFor(ingester)
if err != nil {
Expand Down
7 changes: 6 additions & 1 deletion distributor/http_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,13 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) {

_, err := d.Push(ctx, &req)
if err != nil {
switch err {
case errIngestionRateLimitExceeded:
http.Error(w, err.Error(), http.StatusTooManyRequests)
default:
http.Error(w, err.Error(), http.StatusInternalServerError)
}
log.Errorf("append err: %v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}

Expand Down
27 changes: 27 additions & 0 deletions vendor/golang.org/x/time/rate/LICENSE

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading