From 12493417fc4fe63daef857688f94e769e8daf598 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Thu, 19 Dec 2019 11:07:02 +0100 Subject: [PATCH] Fixed typos in comments Signed-off-by: Marco Pracucci --- pkg/cacheutil/memcached_client.go | 11 ++++++----- pkg/cacheutil/memcached_client_test.go | 14 +++++++------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/pkg/cacheutil/memcached_client.go b/pkg/cacheutil/memcached_client.go index 1aea34c5c81..203bd321ca8 100644 --- a/pkg/cacheutil/memcached_client.go +++ b/pkg/cacheutil/memcached_client.go @@ -44,6 +44,8 @@ type MemcachedClient interface { GetMulti(ctx context.Context, keys []string) map[string][]byte // SetAsync enqueues an asynchronous operation to store a key into memcached. + // Returns an error in case it fails to enqueue the operation. In case the + // underlying async operation will fail, the error will be tracked/logged. SetAsync(ctx context.Context, key string, value []byte, ttl time.Duration) error // Stop client and release underlying resources. @@ -261,7 +263,7 @@ func (c *memcachedClient) SetAsync(ctx context.Context, key string, value []byte span.Finish() if err != nil { c.failures.WithLabelValues(opSet).Inc() - level.Warn(c.logger).Log("msg", "failed to store item memcached", "key", key, "err", err) + level.Warn(c.logger).Log("msg", "failed to store item to memcached", "key", key, "err", err) return } @@ -272,7 +274,7 @@ func (c *memcachedClient) SetAsync(ctx context.Context, key string, value []byte func (c *memcachedClient) GetMulti(ctx context.Context, keys []string) map[string][]byte { batches, err := c.getMultiBatched(ctx, keys) if err != nil { - level.Warn(c.logger).Log("msg", "failed to fetch keys from memcached", "err", err) + level.Warn(c.logger).Log("msg", "failed to fetch items from memcached", "err", err) // In case we have both results and an error, it means some batch requests // failed and other succeeded. In this case we prefer to log it and move on, @@ -311,12 +313,11 @@ func (c *memcachedClient) getMultiBatched(ctx context.Context, keys []string) ([ numResults++ } - // Split input keys into batches and schedule a job for it. + // Spawn a goroutine for each batch request. The max concurrency will be + // enforced by getMultiSingle(). results := make(chan *memcachedGetMultiResult, numResults) defer close(results) - // Spawn a goroutine for each batch request. The max concurrency will be - // enforced by getMultiSingle(). for batchStart := 0; batchStart < len(keys); batchStart += batchSize { batchEnd := batchStart + batchSize if batchEnd > len(keys) { diff --git a/pkg/cacheutil/memcached_client_test.go b/pkg/cacheutil/memcached_client_test.go index 41f40a82184..f73f1a27cee 100644 --- a/pkg/cacheutil/memcached_client_test.go +++ b/pkg/cacheutil/memcached_client_test.go @@ -148,7 +148,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { }, expectedGetMultiCount: 1, }, - "should fetch keys in a multiple batches if the input keys is > the max batch size": { + "should fetch keys in multiple batches if the input keys is > the max batch size": { maxBatchSize: 2, maxConcurrency: 5, initialItems: []memcache.Item{ @@ -164,7 +164,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { }, expectedGetMultiCount: 2, }, - "should fetch keys in a multiple batches on input keys exact multiple of batch size": { + "should fetch keys in multiple batches on input keys exact multiple of batch size": { maxBatchSize: 2, maxConcurrency: 5, initialItems: []memcache.Item{ @@ -182,7 +182,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { }, expectedGetMultiCount: 2, }, - "should fetch keys in a multiple batches on input keys exact multiple of batch size with max concurrency disabled (0)": { + "should fetch keys in multiple batches on input keys exact multiple of batch size with max concurrency disabled (0)": { maxBatchSize: 2, maxConcurrency: 0, initialItems: []memcache.Item{ @@ -200,7 +200,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { }, expectedGetMultiCount: 2, }, - "should fetch keys in a multiple batches on input keys exact multiple of batch size with max concurrency lower than the batches": { + "should fetch keys in multiple batches on input keys exact multiple of batch size with max concurrency lower than the batches": { maxBatchSize: 1, maxConcurrency: 1, initialItems: []memcache.Item{ @@ -254,7 +254,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { }, expectedGetMultiCount: 1, }, - "should no hits on all keys missing": { + "should return no hits on all keys missing": { maxBatchSize: 2, maxConcurrency: 5, initialItems: []memcache.Item{ @@ -268,7 +268,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { }, expectedGetMultiCount: 2, }, - "should no hits on partial errors while fetching batches and no items found": { + "should return no hits on partial errors while fetching batches and no items found": { maxBatchSize: 2, maxConcurrency: 5, mockedGetMultiErrors: 1, @@ -281,7 +281,7 @@ func TestMemcachedClient_GetMulti(t *testing.T) { expectedHits: map[string][]byte{}, expectedGetMultiCount: 2, }, - "should no hits on all errors while fetching batches": { + "should return no hits on all errors while fetching batches": { maxBatchSize: 2, maxConcurrency: 5, mockedGetMultiErrors: 2,