Skip to content
This repository was archived by the owner on Aug 23, 2023. It is now read-only.

Commit 63f6f04

Browse files
committed
typo's and cleanups
1 parent f6bdc15 commit 63f6f04

File tree

8 files changed

+15
-17
lines changed

8 files changed

+15
-17
lines changed

idx/bigtable/bigtable.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ func (b *BigtableIdx) Update(point schema.MetricPoint, partition int32) (idx.Arc
217217

218218
if inMemory {
219219
// bigtable uses partition ID in the key prefix, so an "update" that changes the partition for
220-
// an existing metricDef will just create a new row in the table and wont remove the old row.
220+
// an existing metricDef will just create a new row in the table and won't remove the old row.
221221
// So we need to explicitly delete the old entry.
222222
if oldPartition != partition {
223223
go func() {
@@ -255,7 +255,7 @@ func (b *BigtableIdx) AddOrUpdate(mkey schema.MKey, data *schema.MetricData, par
255255

256256
if inMemory {
257257
// bigtable uses partition ID in the key prefix, so an "update" that changes the partition for
258-
// an existing metricDef will just create a new row in the table and wont remove the old row.
258+
// an existing metricDef will just create a new row in the table and won't remove the old row.
259259
// So we need to explicitly delete the old entry.
260260
if oldPartition != partition {
261261
go func() {
@@ -289,7 +289,7 @@ func (b *BigtableIdx) updateBigtable(now uint32, inMemory bool, archive idx.Arch
289289
b.MemoryIdx.UpdateArchive(archive)
290290
} else {
291291
// perform a non-blocking write to the writeQueue. If the queue is full, then
292-
// this will fail and we wont update the LastSave timestamp. The next time
292+
// this will fail and we won't update the LastSave timestamp. The next time
293293
// the metric is seen, the previous lastSave timestamp will still be in place and so
294294
// we will try and save again. This will continue until we are successful or the
295295
// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will
@@ -341,7 +341,7 @@ func (b *BigtableIdx) LoadPartition(partition int32, defs []schema.MetricDefinit
341341
return true
342342
}, bigtable.RowFilter(bigtable.FamilyFilter(COLUMN_FAMILY)))
343343
if err != nil {
344-
log.Fatalf("bigtable-idx: failed to load defs form Bigtable. %s", err)
344+
log.Fatalf("bigtable-idx: failed to load defs from Bigtable. %s", err)
345345
}
346346
if marshalErr != nil {
347347
log.Fatalf("bigtable-idx: failed to marshal row to metricDef. %s", marshalErr)
@@ -390,7 +390,7 @@ func (b *BigtableIdx) processWriteQueue() {
390390
errs, err := b.tbl.ApplyBulk(context.Background(), rowKeys, mutations)
391391
if err != nil {
392392
statQueryInsertFail.Add(len(rowKeys))
393-
log.Errorf("bigtable-idx: Failed to write %d defs to bigtable. they wont be retried. %s", len(rowKeys), err)
393+
log.Errorf("bigtable-idx: Failed to write %d defs to bigtable. they won't be retried. %s", len(rowKeys), err)
394394
complete = true
395395
} else if len(errs) > 0 {
396396
var failedRowKeys []string

idx/bigtable/config.go

-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ func ConfigSetup() {
7676
btIdx.BoolVar(&CliConfig.CreateCF, "create-cf", CliConfig.CreateCF, "enable the creation of the table and column families")
7777

7878
globalconf.Register("bigtable-idx", btIdx)
79-
return
8079
}
8180

8281
func ConfigProcess() {

idx/cassandra/cassandra.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ func (c *CasIdx) Update(point schema.MetricPoint, partition int32) (idx.Archive,
275275

276276
if inMemory {
277277
// Cassandra uses partition id as the partitioning key, so an "update" that changes the partition for
278-
// an existing metricDef will just create a new row in the table and wont remove the old row.
278+
// an existing metricDef will just create a new row in the table and won't remove the old row.
279279
// So we need to explicitly delete the old entry.
280280
if oldPartition != partition {
281281
c.deleteDefAsync(point.MKey, oldPartition)
@@ -309,7 +309,7 @@ func (c *CasIdx) AddOrUpdate(mkey schema.MKey, data *schema.MetricData, partitio
309309

310310
if inMemory {
311311
// Cassandra uses partition id as the partitioning key, so an "update" that changes the partition for
312-
// an existing metricDef will just create a new row in the table and wont remove the old row.
312+
// an existing metricDef will just create a new row in the table and won't remove the old row.
313313
// So we need to explicitly delete the old entry.
314314
if oldPartition != partition {
315315
c.deleteDefAsync(mkey, oldPartition)
@@ -338,7 +338,7 @@ func (c *CasIdx) updateCassandra(now uint32, inMemory bool, archive idx.Archive,
338338
c.MemoryIdx.UpdateArchive(archive)
339339
} else {
340340
// perform a non-blocking write to the writeQueue. If the queue is full, then
341-
// this will fail and we wont update the LastSave timestamp. The next time
341+
// this will fail and we won't update the LastSave timestamp. The next time
342342
// the metric is seen, the previous lastSave timestamp will still be in place and so
343343
// we will try and save again. This will continue until we are successful or the
344344
// lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will

idx/cassandra/cassandra_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -405,7 +405,7 @@ func TestFind(t *testing.T) {
405405
So(nodes, ShouldHaveLength, 0)
406406
})
407407

408-
Convey("When searching nodes that dont exist", t, func() {
408+
Convey("When searching nodes that don't exist", t, func() {
409409
nodes, err := ix.Find(1, "foo.demo.blah.*", 0)
410410
So(err, ShouldBeNil)
411411
So(nodes, ShouldHaveLength, 0)

idx/memory/memory.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ func (m *MemoryIdx) Load(defs []schema.MetricDefinition) int {
380380
}
381381

382382
// as we are loading the metricDefs from a persistent store, set the lastSave
383-
// to the lastUpdate timestamp. This wont exactly match the true lastSave Timstamp,
383+
// to the lastUpdate timestamp. This won't exactly match the true lastSave Timstamp,
384384
// but it will be close enough and it will always be true that the lastSave was at
385385
// or after this time. For metrics that are sent at or close to real time (the typical
386386
// use case), then the value will be within a couple of seconds of the true lastSave.

idx/memory/memory_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,7 @@ func testFind(t *testing.T) {
266266
So(nodes, ShouldHaveLength, 0)
267267
})
268268

269-
Convey("When searching nodes that dont exist", t, func() {
269+
Convey("When searching nodes that don't exist", t, func() {
270270
nodes, err := ix.Find(1, "foo.demo.blah.*", 0)
271271
So(err, ShouldBeNil)
272272
So(nodes, ShouldHaveLength, 0)

store/bigtable/bigtable.go

+3-4
Original file line numberDiff line numberDiff line change
@@ -191,8 +191,8 @@ func NewStore(cfg *StoreConfig, ttls []uint32, schemaMaxChunkSpan uint32) (*Stor
191191
readLimiter: util.NewLimiter(cfg.ReadConcurrency),
192192
cfg: cfg,
193193
}
194+
s.wg.Add(cfg.WriteConcurrency)
194195
for i := 0; i < cfg.WriteConcurrency; i++ {
195-
s.wg.Add(1)
196196
// Each processWriteQueue thread uses a channel and a buffer for queuing unwritten chunks.
197197
// In total, each processWriteQueue thread should not have more then "write-queue-size" chunks
198198
// that are queued. To ensure this, set the channel size to "write-queue-size" - "write-max-flush-size"
@@ -398,7 +398,7 @@ func (s *Store) Search(ctx context.Context, key schema.AMKey, ttl, start, end ui
398398
agg = key.Archive.String()
399399
}
400400
// filter the results to just the agg method (Eg raw, min_60, max_1800, etc..) and the timerange we want.
401-
// we fetch all columnFamilies (which are the different TTLS). Typically there will be only one columnFamily
401+
// we fetch all columnFamilies (which are the different TTLs). Typically there will be only one columnFamily
402402
// that has data, unless the TTL of the agg has changed. In which case we want all columnFamilies anyway.
403403
filter := bigtable.ChainFilters(
404404
bigtable.ColumnFilter(agg),
@@ -431,8 +431,7 @@ func (s *Store) Search(ctx context.Context, key schema.AMKey, ttl, start, end ui
431431
}
432432
chunks++
433433

434-
// This function is called serially so we dont need synchronization around adding to
435-
// itgens.
434+
// This function is called serially so we don't need synchronization here
436435
itgens = append(itgens, *itgen)
437436
}
438437
}

store/bigtable/config.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ type StoreConfig struct {
2626
}
2727

2828
func (cfg *StoreConfig) Validate(schemaMaxChunkSpan uint32) error {
29-
// If we dont have any write threads, then WriteMaxFlushSize and WriteQueueSize
29+
// If we don't have any write threads, then WriteMaxFlushSize and WriteQueueSize
3030
// are not used. If we do have write threads, then we need to make sure that
3131
// the the writeMaxFlushSize is not larger then the bigtable hardcoded limit of 100k
3232
// and that the writeQueue size is larger then the maxFlush.

0 commit comments

Comments
 (0)