-
Notifications
You must be signed in to change notification settings - Fork 1.5k
/
draft.go
1514 lines (1363 loc) · 48.2 KB
/
draft.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Copyright 2016-2018 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package worker
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"sort"
"sync"
"sync/atomic"
"time"
humanize "github.com/dustin/go-humanize"
"go.etcd.io/etcd/raft"
"go.etcd.io/etcd/raft/raftpb"
ostats "go.opencensus.io/stats"
"go.opencensus.io/tag"
otrace "go.opencensus.io/trace"
"github.com/dgraph-io/badger/v2"
bpb "github.com/dgraph-io/badger/v2/pb"
"github.com/dgraph-io/badger/v2/y"
"github.com/dgraph-io/dgraph/conn"
"github.com/dgraph-io/dgraph/dgraph/cmd/zero"
"github.com/dgraph-io/dgraph/posting"
"github.com/dgraph-io/dgraph/protos/pb"
"github.com/dgraph-io/dgraph/raftwal"
"github.com/dgraph-io/dgraph/schema"
"github.com/dgraph-io/dgraph/types"
"github.com/dgraph-io/dgraph/x"
"github.com/pkg/errors"
"github.com/golang/glog"
"golang.org/x/net/trace"
)
type node struct {
*conn.Node
// Fields which are never changed after init.
applyCh chan []*pb.Proposal
rollupCh chan uint64 // Channel to run posting list rollups.
ctx context.Context
gid uint32
closer *y.Closer
streaming int32 // Used to avoid calculating snapshot
canCampaign bool
elog trace.EventLog
pendingSize int64
ex *Executor
}
// Now that we apply txn updates via Raft, waiting based on Txn timestamps is
// sufficient. We don't need to wait for proposals to be applied.
func newNode(store *raftwal.DiskStorage, gid uint32, id uint64, myAddr string) *node {
glog.Infof("Node ID: %#x with GroupID: %d\n", id, gid)
rc := &pb.RaftContext{
Addr: myAddr,
Group: gid,
Id: id,
}
m := conn.NewNode(rc, store)
n := &node{
Node: m,
ctx: context.Background(),
gid: gid,
// We need a generous size for applyCh, because raft.Tick happens every
// 10ms. If we restrict the size here, then Raft goes into a loop trying
// to maintain quorum health.
applyCh: make(chan []*pb.Proposal, 1000),
rollupCh: make(chan uint64, 3),
elog: trace.NewEventLog("Dgraph", "ApplyCh"),
closer: y.NewCloser(3), // Matches CLOSER:1
}
if x.WorkerConfig.LudicrousMode {
n.ex = newExecutor()
}
return n
}
func (n *node) Ctx(key string) context.Context {
if pctx := n.Proposals.Get(key); pctx != nil {
return pctx.Ctx
}
return context.Background()
}
func (n *node) applyConfChange(e raftpb.Entry) {
var cc raftpb.ConfChange
if err := cc.Unmarshal(e.Data); err != nil {
glog.Errorf("While unmarshalling confchange: %+v", err)
}
if cc.Type == raftpb.ConfChangeRemoveNode {
n.DeletePeer(cc.NodeID)
} else if len(cc.Context) > 0 {
var rc pb.RaftContext
x.Check(rc.Unmarshal(cc.Context))
n.Connect(rc.Id, rc.Addr)
}
cs := n.Raft().ApplyConfChange(cc)
n.SetConfState(cs)
n.DoneConfChange(cc.ID, nil)
}
var errHasPendingTxns = errors.New("Pending transactions found. Please retry operation")
// We must not wait here. Previously, we used to block until we have aborted the
// transactions. We're now applying all updates serially, so blocking for one
// operation is not an option.
func detectPendingTxns(attr string) error {
tctxs := posting.Oracle().IterateTxns(func(key []byte) bool {
pk, err := x.Parse(key)
if err != nil {
return false
}
return pk.Attr == attr
})
if len(tctxs) == 0 {
return nil
}
go tryAbortTransactions(tctxs)
return errHasPendingTxns
}
// We don't support schema mutations across nodes in a transaction.
// Wait for all transactions to either abort or complete and all write transactions
// involving the predicate are aborted until schema mutations are done.
func (n *node) applyMutations(ctx context.Context, proposal *pb.Proposal) (rerr error) {
span := otrace.FromContext(ctx)
if proposal.Mutations.DropOp == pb.Mutations_DATA {
// Ensures nothing get written to disk due to commit proposals.
posting.Oracle().ResetTxns()
return posting.DeleteData()
}
if proposal.Mutations.DropOp == pb.Mutations_ALL {
// Ensures nothing get written to disk due to commit proposals.
posting.Oracle().ResetTxns()
schema.State().DeleteAll()
if err := posting.DeleteAll(); err != nil {
return err
}
if groups().groupId() == 1 {
initialSchema := schema.InitialSchema()
for _, s := range initialSchema {
if err := updateSchema(s); err != nil {
return err
}
if servesTablet, err := groups().ServesTablet(s.Predicate); err != nil {
return err
} else if !servesTablet {
return errors.Errorf("group 1 should always serve reserved predicate %s",
s.Predicate)
}
}
}
return nil
}
if proposal.Mutations.DropOp == pb.Mutations_TYPE {
return schema.State().DeleteType(proposal.Mutations.DropValue)
}
if proposal.Mutations.StartTs == 0 {
return errors.New("StartTs must be provided")
}
startTs := proposal.Mutations.StartTs
if len(proposal.Mutations.Schema) > 0 || len(proposal.Mutations.Types) > 0 {
span.Annotatef(nil, "Applying schema and types")
for _, supdate := range proposal.Mutations.Schema {
// We should not need to check for predicate move here.
if err := detectPendingTxns(supdate.Predicate); err != nil {
return err
}
if err := runSchemaMutation(ctx, supdate, startTs); err != nil {
return err
}
}
for _, tupdate := range proposal.Mutations.Types {
if err := runTypeMutation(ctx, tupdate); err != nil {
return err
}
}
return nil
}
// Scheduler tracks tasks at subject, predicate level, so doing
// schema stuff here simplies the design and we needn't worry about
// serializing the mutations per predicate or schema mutations
// We derive the schema here if it's not present
// Since raft committed logs are serialized, we can derive
// schema here without any locking
// Stores a map of predicate and type of first mutation for each predicate.
schemaMap := make(map[string]types.TypeID)
for _, edge := range proposal.Mutations.Edges {
if edge.Entity == 0 && bytes.Equal(edge.Value, []byte(x.Star)) {
// We should only drop the predicate if there is no pending
// transaction.
if err := detectPendingTxns(edge.Attr); err != nil {
span.Annotatef(nil, "Found pending transactions. Retry later.")
return err
}
span.Annotatef(nil, "Deleting predicate: %s", edge.Attr)
return posting.DeletePredicate(ctx, edge.Attr)
}
// Don't derive schema when doing deletion.
if edge.Op == pb.DirectedEdge_DEL {
continue
}
if _, ok := schemaMap[edge.Attr]; !ok {
schemaMap[edge.Attr] = posting.TypeID(edge)
}
}
total := len(proposal.Mutations.Edges)
// TODO: Active mutations values can go up or down but with
// OpenCensus stats bucket boundaries start from 0, hence
// recording negative and positive values skews up values.
ostats.Record(ctx, x.ActiveMutations.M(int64(total)))
defer func() {
ostats.Record(ctx, x.ActiveMutations.M(int64(-total)))
}()
// Go through all the predicates and their first observed schema type. If we are unable to find
// these predicates in the current schema state, add them to the schema state. Note that the
// schema deduction is done by RDF/JSON chunker.
for attr, storageType := range schemaMap {
if _, err := schema.State().TypeOf(attr); err != nil {
hint := pb.Metadata_DEFAULT
if mutHint, ok := proposal.Mutations.Metadata.PredHints[attr]; ok {
hint = mutHint
}
if err := createSchema(attr, storageType, hint); err != nil {
return err
}
}
}
m := proposal.Mutations
txn := posting.Oracle().RegisterStartTs(m.StartTs)
if txn.ShouldAbort() {
span.Annotatef(nil, "Txn %d should abort.", m.StartTs)
return zero.ErrConflict
}
// Discard the posting lists from cache to release memory at the end.
if !x.WorkerConfig.LudicrousMode {
defer txn.Update()
}
// It is possible that the user gives us multiple versions of the same edge, one with no facets
// and another with facets. In that case, use stable sort to maintain the ordering given to us
// by the user.
// TODO: Do this in a way, where we don't break multiple updates for the same Edge across
// different goroutines.
sort.SliceStable(m.Edges, func(i, j int) bool {
ei := m.Edges[i]
ej := m.Edges[j]
if ei.GetAttr() != ej.GetAttr() {
return ei.GetAttr() < ej.GetAttr()
}
return ei.GetEntity() < ej.GetEntity()
})
if x.WorkerConfig.LudicrousMode {
n.ex.addEdges(ctx, m.StartTs, m.Edges)
return nil
}
process := func(edges []*pb.DirectedEdge) error {
var retries int
for _, edge := range edges {
for {
err := runMutation(ctx, edge, txn)
if err == nil {
break
}
if err != posting.ErrRetry {
return err
}
retries++
}
}
if retries > 0 {
span.Annotatef(nil, "retries=true num=%d", retries)
}
return nil
}
numGo, width := x.DivideAndRule(len(m.Edges))
span.Annotatef(nil, "To apply: %d edges. NumGo: %d. Width: %d", len(m.Edges), numGo, width)
if numGo == 1 {
return process(m.Edges)
}
errCh := make(chan error, numGo)
for i := 0; i < numGo; i++ {
start := i * width
end := start + width
if end > len(m.Edges) {
end = len(m.Edges)
}
go func(start, end int) {
errCh <- process(m.Edges[start:end])
}(start, end)
}
for i := 0; i < numGo; i++ {
if err := <-errCh; err != nil {
return err
}
}
return nil
}
func (n *node) applyCommitted(proposal *pb.Proposal) error {
ctx := n.Ctx(proposal.Key)
span := otrace.FromContext(ctx)
span.Annotatef(nil, "node.applyCommitted Node id: %d. Group id: %d. Got proposal key: %s",
n.Id, n.gid, proposal.Key)
if proposal.Mutations != nil {
// syncmarks for this shouldn't be marked done until it's committed.
span.Annotate(nil, "Applying mutations")
if err := n.applyMutations(ctx, proposal); err != nil {
span.Annotatef(nil, "While applying mutations: %v", err)
return err
}
span.Annotate(nil, "Done")
return nil
}
switch {
case len(proposal.Kv) > 0:
return populateKeyValues(ctx, proposal.Kv)
case proposal.State != nil:
n.elog.Printf("Applying state for key: %s", proposal.Key)
// This state needn't be snapshotted in this group, on restart we would fetch
// a state which is latest or equal to this.
groups().applyState(proposal.State)
return nil
case len(proposal.CleanPredicate) > 0:
n.elog.Printf("Cleaning predicate: %s", proposal.CleanPredicate)
end := time.Now().Add(10 * time.Second)
for proposal.ExpectedChecksum > 0 && time.Now().Before(end) {
cur := atomic.LoadUint64(&groups().membershipChecksum)
if proposal.ExpectedChecksum == cur {
break
}
time.Sleep(100 * time.Millisecond)
glog.Infof("Waiting for checksums to match. Expected: %d. Current: %d\n",
proposal.ExpectedChecksum, cur)
}
if time.Now().After(end) {
glog.Warningf(
"Giving up on predicate deletion: %q due to timeout. Wanted checksum: %d.",
proposal.CleanPredicate, proposal.ExpectedChecksum)
return nil
}
return posting.DeletePredicate(ctx, proposal.CleanPredicate)
case proposal.Delta != nil:
n.elog.Printf("Applying Oracle Delta for key: %s", proposal.Key)
return n.commitOrAbort(proposal.Key, proposal.Delta)
case proposal.Snapshot != nil:
existing, err := n.Store.Snapshot()
if err != nil {
return err
}
snap := proposal.Snapshot
if existing.Metadata.Index >= snap.Index {
log := fmt.Sprintf("Skipping snapshot at %d, because found one at %d",
snap.Index, existing.Metadata.Index)
n.elog.Printf(log)
glog.Info(log)
return nil
}
n.elog.Printf("Creating snapshot: %+v", snap)
glog.Infof("Creating snapshot at index: %d. ReadTs: %d.\n", snap.Index, snap.ReadTs)
data, err := snap.Marshal()
x.Check(err)
for {
// We should never let CreateSnapshot have an error.
err := n.Store.CreateSnapshot(snap.Index, n.ConfState(), data)
if err == nil {
break
}
glog.Warningf("Error while calling CreateSnapshot: %v. Retrying...", err)
}
// Roll up all posting lists as a best-effort operation.
n.rollupCh <- snap.ReadTs
return nil
}
x.Fatalf("Unknown proposal: %+v", proposal)
return nil
}
func (n *node) processRollups() {
defer n.closer.Done() // CLOSER:1
tick := time.NewTicker(5 * time.Minute) // Rolling up once every 5 minutes seems alright.
defer tick.Stop()
var readTs, last uint64
for {
select {
case <-n.closer.HasBeenClosed():
return
case readTs = <-n.rollupCh:
case <-tick.C:
glog.V(3).Infof("Evaluating rollup readTs:%d last:%d rollup:%v", readTs, last, readTs > last)
if readTs <= last {
break // Break out of the select case.
}
if err := n.rollupLists(readTs); err != nil {
// If we encounter error here, we don't need to do anything about
// it. Just let the user know.
glog.Errorf("Error while rolling up lists at %d: %v\n", readTs, err)
} else {
last = readTs // Update last only if we succeeded.
glog.Infof("List rollup at Ts %d: OK.\n", readTs)
}
}
}
}
func (n *node) processApplyCh() {
defer n.closer.Done() // CLOSER:1
type P struct {
err error
size int
seen time.Time
}
previous := make(map[string]*P)
// This function must be run serially.
handle := func(proposals []*pb.Proposal) {
var totalSize int64
for _, proposal := range proposals {
// We use the size as a double check to ensure that we're
// working with the same proposal as before.
psz := proposal.Size()
totalSize += int64(psz)
var perr error
p, ok := previous[proposal.Key]
if ok && p.err == nil && p.size == psz {
n.elog.Printf("Proposal with key: %s already applied. Skipping index: %d.\n",
proposal.Key, proposal.Index)
previous[proposal.Key].seen = time.Now() // Update the ts.
// Don't break here. We still need to call the Done below.
} else {
start := time.Now()
perr = n.applyCommitted(proposal)
if len(proposal.Key) > 0 {
p := &P{err: perr, size: psz, seen: time.Now()}
previous[proposal.Key] = p
}
if perr != nil {
glog.Errorf("Applying proposal. Error: %v. Proposal: %q.", perr, proposal)
}
n.elog.Printf("Applied proposal with key: %s, index: %d. Err: %v",
proposal.Key, proposal.Index, perr)
var tags []tag.Mutator
switch {
case proposal.Mutations != nil:
tags = append(tags, tag.Upsert(x.KeyMethod, "apply.Mutations"))
case proposal.Delta != nil:
tags = append(tags, tag.Upsert(x.KeyMethod, "apply.Delta"))
}
ms := x.SinceMs(start)
_ = ostats.RecordWithTags(context.Background(), tags, x.LatencyMs.M(ms))
}
n.Proposals.Done(proposal.Key, perr)
n.Applied.Done(proposal.Index)
ostats.Record(context.Background(), x.RaftAppliedIndex.M(int64(n.Applied.DoneUntil())))
}
if sz := atomic.AddInt64(&n.pendingSize, -totalSize); sz < 0 {
glog.Warningf("Pending size should remain above zero: %d", sz)
}
}
maxAge := 10 * time.Minute
tick := time.NewTicker(maxAge / 2)
defer tick.Stop()
for {
select {
case entries, ok := <-n.applyCh:
if !ok {
return
}
handle(entries)
case <-tick.C:
// We use this ticker to clear out previous map.
now := time.Now()
for key, p := range previous {
if now.Sub(p.seen) > maxAge {
delete(previous, key)
}
}
n.elog.Printf("Size of previous map: %d", len(previous))
}
}
}
func (n *node) commitOrAbort(pkey string, delta *pb.OracleDelta) error {
// First let's commit all mutations to disk.
writer := posting.NewTxnWriter(pstore)
toDisk := func(start, commit uint64) {
txn := posting.Oracle().GetTxn(start)
if txn == nil {
return
}
txn.Update()
err := x.RetryUntilSuccess(x.WorkerConfig.MaxRetries, 10*time.Millisecond, func() error {
return txn.CommitToDisk(writer, commit)
})
if err != nil {
glog.Errorf("Error while applying txn status to disk (%d -> %d): %v",
start, commit, err)
}
}
for _, status := range delta.Txns {
toDisk(status.StartTs, status.CommitTs)
}
if x.WorkerConfig.LudicrousMode {
if err := writer.Wait(); err != nil {
glog.Errorf("Error while waiting to commit: +%v", err)
}
} else {
if err := writer.Flush(); err != nil {
return errors.Wrapf(err, "while flushing to disk")
}
}
g := groups()
if delta.GroupChecksums != nil && delta.GroupChecksums[g.groupId()] > 0 {
atomic.StoreUint64(&g.deltaChecksum, delta.GroupChecksums[g.groupId()])
}
// Now advance Oracle(), so we can service waiting reads.
posting.Oracle().ProcessDelta(delta)
return nil
}
func (n *node) leaderBlocking() (*conn.Pool, error) {
pool := groups().Leader(groups().groupId())
if pool == nil {
// Functions like retrieveSnapshot and joinPeers are blocking at initial start and
// leader election for a group might not have happened when it is called. If we can't
// find a leader, get latest state from Zero.
if err := UpdateMembershipState(context.Background()); err != nil {
return nil, errors.Errorf("Error while trying to update membership state: %+v", err)
}
return nil, errors.Errorf("Unable to reach leader in group %d", n.gid)
}
return pool, nil
}
func (n *node) Snapshot() (*pb.Snapshot, error) {
if n == nil || n.Store == nil {
return nil, conn.ErrNoNode
}
snap, err := n.Store.Snapshot()
if err != nil {
return nil, err
}
res := &pb.Snapshot{}
if err := res.Unmarshal(snap.Data); err != nil {
return nil, err
}
return res, nil
}
func (n *node) retrieveSnapshot(snap pb.Snapshot) error {
// In some edge cases, the Zero leader might not have been able to update
// the status of Alpha leader. So, instead of blocking forever on waiting
// for Zero to send us the updates info about the leader, we can just use
// the Snapshot RaftContext, which contains the address of the leader.
var pool *conn.Pool
addr := snap.Context.GetAddr()
glog.V(2).Infof("Snapshot.RaftContext.Addr: %q", addr)
if len(addr) > 0 {
p, err := conn.GetPools().Get(addr)
if err != nil {
glog.V(2).Infof("conn.Get(%q) Error: %v", addr, err)
} else {
pool = p
glog.V(2).Infof("Leader connection picked from RaftContext")
}
}
if pool == nil {
glog.V(2).Infof("No leader conn from RaftContext. Using membership state.")
p, err := n.leaderBlocking()
if err != nil {
return err
}
pool = p
}
// Need to clear pl's stored in memory for the case when retrieving snapshot with
// index greater than this node's last index
// Should invalidate/remove pl's to this group only ideally
//
// We can safely evict posting lists from memory. Because, all the updates corresponding to txn
// commits up until then have already been written to pstore. And the way we take snapshots, we
// keep all the pre-writes for a pending transaction, so they will come back to memory, as Raft
// logs are replayed.
if _, err := n.populateSnapshot(snap, pool); err != nil {
return errors.Wrapf(err, "cannot retrieve snapshot from peer")
}
// Populate shard stores the streamed data directly into db, so we need to refresh
// schema for current group id
if err := schema.LoadFromDb(); err != nil {
return errors.Wrapf(err, "while initializing schema")
}
groups().triggerMembershipSync()
return nil
}
func (n *node) proposeSnapshot(discardN int) error {
snap, err := n.calculateSnapshot(0, discardN)
if err != nil {
return err
}
if snap == nil {
return nil
}
proposal := &pb.Proposal{
Snapshot: snap,
}
n.elog.Printf("Proposing snapshot: %+v\n", snap)
data, err := proposal.Marshal()
x.Check(err)
return n.Raft().Propose(n.ctx, data)
}
const maxPendingSize int64 = 64 << 20 // in bytes.
func (n *node) rampMeter() {
start := time.Now()
defer func() {
if dur := time.Since(start); dur > time.Second {
glog.Infof("Blocked pushing to applyCh for %v", dur.Round(time.Millisecond))
}
}()
for {
if atomic.LoadInt64(&n.pendingSize) <= maxPendingSize {
return
}
time.Sleep(3 * time.Millisecond)
}
}
func (n *node) updateRaftProgress() error {
// Both leader and followers can independently update their Raft progress. We don't store
// this in Raft WAL. Instead, this is used to just skip over log records that this Alpha
// has already applied, to speed up things on a restart.
//
// Let's check what we already have. And only update if the new snap.Index is ahead of the last
// stored applied.
applied, err := n.Store.Checkpoint()
if err != nil {
return err
}
snap, err := n.calculateSnapshot(applied, 3) // 3 is a randomly chosen small number.
if err != nil || snap == nil || snap.Index <= applied {
return err
}
if err := n.Store.UpdateCheckpoint(snap); err != nil {
return err
}
glog.V(2).Infof("[%#x] Set Raft progress to index: %d.", n.Id, snap.Index)
return nil
}
func (n *node) checkpointAndClose(done chan struct{}) {
slowTicker := time.NewTicker(time.Minute)
defer slowTicker.Stop()
for {
select {
case <-slowTicker.C:
// Do these operations asynchronously away from the main Run loop to allow heartbeats to
// be sent on time. Otherwise, followers would just keep running elections.
n.elog.Printf("Size of applyCh: %d", len(n.applyCh))
if err := n.updateRaftProgress(); err != nil {
glog.Errorf("While updating Raft progress: %v", err)
}
if n.AmLeader() {
var calculate bool
if chk, err := n.Store.Checkpoint(); err == nil {
if first, err := n.Store.FirstIndex(); err == nil {
// Save some cycles by only calculating snapshot if the checkpoint has gone
// quite a bit further than the first index.
calculate = chk >= first+uint64(x.WorkerConfig.SnapshotAfter)
glog.V(3).Infof("Evaluating snapshot first:%d chk:%d (chk-first:%d) "+
"snapshotAfter:%d snap:%v", first, chk, chk-first,
x.WorkerConfig.SnapshotAfter, calculate)
}
}
// We keep track of the applied index in the p directory. Even if we don't take
// snapshot for a while and let the Raft logs grow and restart, we would not have to
// run all the log entries, because we can tell Raft.Config to set Applied to that
// index.
// This applied index tracking also covers the case when we have a big index
// rebuild. The rebuild would be tracked just like others and would not need to be
// replayed after a restart, because the Applied config would let us skip right
// through it.
// We use disk based storage for Raft. So, we're not too concerned about
// snapshotting. We just need to do enough, so that we don't have a huge backlog of
// entries to process on a restart.
if calculate {
if err := n.proposeSnapshot(x.WorkerConfig.SnapshotAfter); err != nil {
glog.Errorf("While calculating and proposing snapshot: %v", err)
}
}
go n.abortOldTransactions()
}
case <-n.closer.HasBeenClosed():
glog.Infof("Stopping node.Run")
if peerId, has := groups().MyPeer(); has && n.AmLeader() {
n.Raft().TransferLeadership(n.ctx, x.WorkerConfig.RaftId, peerId)
time.Sleep(time.Second) // Let transfer happen.
}
n.Raft().Stop()
close(done)
return
}
}
}
func (n *node) drainApplyChan() {
for {
select {
case proposals := <-n.applyCh:
glog.Infof("Draining %d proposals\n", len(proposals))
for _, proposal := range proposals {
n.Proposals.Done(proposal.Key, nil)
n.Applied.Done(proposal.Index)
}
default:
return
}
}
}
func (n *node) Run() {
defer n.closer.Done() // CLOSER:1
firstRun := true
var leader bool
// See also our configuration of HeartbeatTick and ElectionTick.
// Before we used to have 20ms ticks, but they would overload the Raft tick channel, causing
// "tick missed to fire" logs. Etcd uses 100ms and they haven't seen those issues.
// Additionally, using 100ms for ticks does not cause proposals to slow down, because they get
// sent out asap and don't rely on ticks. So, setting this to 100ms instead of 20ms is a NOOP.
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
done := make(chan struct{})
go n.checkpointAndClose(done)
go n.ReportRaftComms()
if x.WorkerConfig.LudicrousMode {
closer := y.NewCloser(2)
defer closer.SignalAndWait()
go x.StoreSync(n.Store, closer)
go x.StoreSync(pstore, closer)
}
applied, err := n.Store.Checkpoint()
if err != nil {
glog.Errorf("While trying to find raft progress: %v", err)
} else {
glog.Infof("Found Raft progress: %d", applied)
}
var timer x.Timer
for {
select {
case <-done:
// We use done channel here instead of closer.HasBeenClosed so that we can transfer
// leadership in a goroutine. The push to n.applyCh happens in this loop, so the close
// should happen here too. Otherwise, race condition between push and close happens.
close(n.applyCh)
glog.Infoln("Raft node done.")
return
// Slow ticker can't be placed here because figuring out checkpoints and snapshots takes
// time and if the leader does not send heartbeats out during this time, the followers
// start an election process. And that election process would just continue to happen
// indefinitely because checkpoints and snapshots are being calculated indefinitely.
case <-ticker.C:
n.Raft().Tick()
case rd := <-n.Raft().Ready():
timer.Start()
_, span := otrace.StartSpan(n.ctx, "Alpha.RunLoop",
otrace.WithSampler(otrace.ProbabilitySampler(0.001)))
if rd.SoftState != nil {
groups().triggerMembershipSync()
leader = rd.RaftState == raft.StateLeader
}
if leader {
// Leader can send messages in parallel with writing to disk.
for i := range rd.Messages {
// NOTE: We can do some optimizations here to drop messages.
n.Send(&rd.Messages[i])
}
}
if span != nil {
span.Annotate(nil, "Handled ReadStates and SoftState.")
}
// We move the retrieval of snapshot before we store the rd.Snapshot, so that in case
// this node fails to get the snapshot, the Raft state would reflect that by not having
// the snapshot on a future probe. This is different from the recommended order in Raft
// docs where they assume that the Snapshot contains the full data, so even on a crash
// between n.SaveToStorage and n.retrieveSnapshot, that Snapshot can be applied by the
// node on a restart. In our case, we don't store the full data in snapshot, only the
// metadata. So, we should only store the snapshot received in Raft, iff we actually
// were able to update the state.
if !raft.IsEmptySnap(rd.Snapshot) {
// We don't send snapshots to other nodes. But, if we get one, that means
// either the leader is trying to bring us up to state; or this is the
// snapshot that I created. Only the former case should be handled.
var snap pb.Snapshot
x.Check(snap.Unmarshal(rd.Snapshot.Data))
rc := snap.GetContext()
x.AssertTrue(rc.GetGroup() == n.gid)
if rc.Id != n.Id {
// Set node to unhealthy state here while it applies the snapshot.
x.UpdateHealthStatus(false)
// We are getting a new snapshot from leader. We need to wait for the applyCh to
// finish applying the updates, otherwise, we'll end up overwriting the data
// from the new snapshot that we retrieved.
// Drain the apply channel. Snapshot will be retrieved next.
maxIndex := n.Applied.LastIndex()
glog.Infof("Drain applyCh by reaching %d before"+
" retrieving snapshot\n", maxIndex)
n.drainApplyChan()
if err := n.Applied.WaitForMark(context.Background(), maxIndex); err != nil {
glog.Errorf("Error waiting for mark for index %d: %+v", maxIndex, err)
}
if currSnap, err := n.Snapshot(); err != nil {
// Retrieve entire snapshot from leader if node does not have
// a current snapshot.
glog.Errorf("Could not retrieve previous snapshot. Setting SinceTs to 0.")
snap.SinceTs = 0
} else {
snap.SinceTs = currSnap.ReadTs
}
// It's ok to block ticks while retrieving snapshot, since it's a follower.
glog.Infof("---> SNAPSHOT: %+v. Group %d from node id %#x\n",
snap, n.gid, rc.Id)
for {
err := n.retrieveSnapshot(snap)
if err == nil {
glog.Infoln("---> Retrieve snapshot: OK.")
break
}
glog.Errorf("While retrieving snapshot, error: %v. Retrying...", err)
time.Sleep(100 * time.Millisecond) // Wait for a bit.
}
glog.Infof("---> SNAPSHOT: %+v. Group %d. DONE.\n", snap, n.gid)
// Set node to healthy state here.
x.UpdateHealthStatus(true)
} else {
glog.Infof("---> SNAPSHOT: %+v. Group %d from node id %#x [SELF]. Ignoring.\n",
snap, n.gid, rc.Id)
}
if span != nil {
span.Annotate(nil, "Applied or retrieved snapshot.")
}
}
// Store the hardstate and entries. Note that these are not CommittedEntries.
n.SaveToStorage(&rd.HardState, rd.Entries, &rd.Snapshot)
timer.Record("disk")
if span != nil {
span.Annotatef(nil, "Saved %d entries. Snapshot, HardState empty? (%v, %v)",
len(rd.Entries),
raft.IsEmptySnap(rd.Snapshot),
raft.IsEmptyHardState(rd.HardState))
}
if !x.WorkerConfig.LudicrousMode && rd.MustSync {
if err := n.Store.Sync(); err != nil {
glog.Errorf("Error while calling Store.Sync: %+v", err)
}
timer.Record("sync")
}
// Now schedule or apply committed entries.
var proposals []*pb.Proposal
for _, entry := range rd.CommittedEntries {
// Need applied watermarks for schema mutation also for read linearazibility
// Applied watermarks needs to be emitted as soon as possible sequentially.
// If we emit Mark{4, false} and Mark{4, true} before emitting Mark{3, false}
// then doneUntil would be set as 4 as soon as Mark{4,true} is done and before
// Mark{3, false} is emitted. So it's safer to emit watermarks as soon as
// possible sequentially
n.Applied.Begin(entry.Index)
switch {
case entry.Type == raftpb.EntryConfChange:
n.applyConfChange(entry)
// Not present in proposal map.
n.Applied.Done(entry.Index)
groups().triggerMembershipSync()
case len(entry.Data) == 0:
n.elog.Printf("Found empty data at index: %d", entry.Index)
n.Applied.Done(entry.Index)
case entry.Index < applied:
n.elog.Printf("Skipping over already applied entry: %d", entry.Index)
n.Applied.Done(entry.Index)
default:
proposal := &pb.Proposal{}
if err := proposal.Unmarshal(entry.Data); err != nil {
x.Fatalf("Unable to unmarshal proposal: %v %q\n", err, entry.Data)
}
if pctx := n.Proposals.Get(proposal.Key); pctx != nil {
atomic.AddUint32(&pctx.Found, 1)
if span := otrace.FromContext(pctx.Ctx); span != nil {
span.Annotate(nil, "Proposal found in CommittedEntries")
}
if x.WorkerConfig.LudicrousMode {
// Assuming that there will be no error while proposing.
n.Proposals.Done(proposal.Key, nil)
}
}
proposal.Index = entry.Index
proposals = append(proposals, proposal)
}
}
// Send the whole lot to applyCh in one go, instead of sending proposals one by one.
if len(proposals) > 0 {
// Apply the meter this before adding size to pending size so some crazy big
// proposal can be pushed to applyCh. If this do this after adding its size to
// pending size, we could block forever in rampMeter.
n.rampMeter()
var pendingSize int64
for _, p := range proposals {
pendingSize += int64(p.Size())