@@ -1431,6 +1431,66 @@ func (r *Replica) tick(
1431
1431
return true , nil
1432
1432
}
1433
1433
1434
+ // processMsgApps sends MsgApp to all peers whose send stream is ready to send.
1435
+ //
1436
+ // FIXME: find the right placement in RACv2 code. Potentially this just needs to
1437
+ // be inlined into the Ready handler.
1438
+ func (r * Replica ) processMsgApps (_ context.Context ) error {
1439
+ r .raftMu .Lock ()
1440
+ defer r .raftMu .Unlock ()
1441
+
1442
+ // We are the leader at the given term.
1443
+ var term uint64 // FIXME: we should know it
1444
+
1445
+ // Grab the snapshot of the log, if we are still the leader of the term. This
1446
+ // only locks Replica.mu for reads, and returns quickly. No IO is performed.
1447
+ var logSnap raft.LogSnapshot
1448
+ if ! func () bool {
1449
+ r .mu .RLock ()
1450
+ defer r .mu .Unlock ()
1451
+ rg := r .mu .internalRaftGroup
1452
+ // We need to be the leader of the given term to be able to send MsgApps.
1453
+ if rg .Term () != term || rg .Lead () != raftpb .PeerID (r .replicaID ) {
1454
+ return false
1455
+ }
1456
+ logSnap = rg .LogSnapshot ()
1457
+ return true
1458
+ }() {
1459
+ return nil
1460
+ }
1461
+
1462
+ // We are still holding raftMu, so it is safe to use the log snapshot for
1463
+ // constructing MsgApps. The log will not be mutated in storage. This will
1464
+ // potentially incur storage reads.
1465
+ //
1466
+ // FIXME: iterate over all peers to whom we should send a MsgApp.
1467
+ slices := make (map [roachpb.ReplicaID ]raft.LogSlice , 5 )
1468
+ for peer := roachpb .ReplicaID (0 ); peer < 1 ; peer ++ {
1469
+ // FIXME: should know the parameters, as instructed by the send streams.
1470
+ var after , last , maxSize uint64
1471
+ slices [peer ] = logSnap .LogSlice (after , last , maxSize )
1472
+ }
1473
+ if len (slices ) == 0 { // nothing to send
1474
+ return nil
1475
+ }
1476
+
1477
+ // Now grab the Replica.mu again (for writes), and send the MsgApp messages.
1478
+ // No IO happens here. The messages are stashed in RawNode message queue, and
1479
+ // will be dispatched with the next Ready handling. Make sure to do all this
1480
+ // right before the raft scheduler runs the Ready handler, to minimize
1481
+ // latency.
1482
+ return r .withRaftGroup (func (rn * raft.RawNode ) (unquiesceAndWakeLeader bool , _ error ) {
1483
+ for peer , slice := range slices {
1484
+ // NB: the message sending can fail here, if we lost leadership in the
1485
+ // meantime, or the Next index is misaligned with the passed-in slice.
1486
+ //
1487
+ // Potentially need to update the send stream accordingly from here.
1488
+ _ = rn .SendMsgApp (raftpb .PeerID (peer ), slice )
1489
+ }
1490
+ return true , nil
1491
+ })
1492
+ }
1493
+
1434
1494
func (r * Replica ) processRACv2PiggybackedAdmitted (ctx context.Context ) {
1435
1495
r .raftMu .Lock ()
1436
1496
defer r .raftMu .Unlock ()
0 commit comments