-
Notifications
You must be signed in to change notification settings - Fork 23.9k
/
Copy pathreplication.c
5011 lines (4474 loc) · 203 KB
/
replication.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Asynchronous replication implementation.
*
* Copyright (c) 2009-Present, Redis Ltd.
* All rights reserved.
*
* Copyright (c) 2024-present, Valkey contributors.
* All rights reserved.
*
* Licensed under your choice of the Redis Source Available License 2.0
* (RSALv2) or the Server Side Public License v1 (SSPLv1).
*
* Portions of this file are available under BSD3 terms; see REDISCONTRIBUTIONS for more information.
*/
/*
* replication.c - Replication Management
*
* This file contains the implementation of Redis's replication logic, which
* enables data synchronization between master and replica instances.
* It handles:
* - Master-to-replica synchronization
* - Full and partial resynchronizations
* - Replication backlog management
* - State machines for replica operations
* - RDB Channel for Full Sync (lookup "rdb channel for full sync")
*/
#include "server.h"
#include "cluster.h"
#include "bio.h"
#include "functions.h"
#include "connection.h"
#include <memory.h>
#include <sys/time.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <sys/stat.h>
void replicationDiscardCachedMaster(void);
void replicationResurrectCachedMaster(connection *conn);
void replicationSendAck(void);
int replicaPutOnline(client *slave);
void replicaStartCommandStream(client *slave);
int cancelReplicationHandshake(int reconnect);
static void rdbChannelFullSyncWithMaster(connection *conn);
static int rdbChannelAbortRdbTransfer(void);
static void rdbChannelBufferReplData(connection *conn);
static void rdbChannelReplDataBufInit(void);
static void rdbChannelSuccess(void);
/* We take a global flag to remember if this instance generated an RDB
* because of replication, so that we can remove the RDB file in case
* the instance is configured to have no persistence. */
int RDBGeneratedByReplication = 0;
/* A reference to diskless loading rio to abort it asynchronously. It's needed
* for rdbchannel replication. While loading from rdbchannel connection, we may
* yield back to eventloop. If main channel connection detects a network problem
* we want to abort loading. It calls rioAbort() in this case, so next rioRead()
* from rdbchannel connection will return error to cancel loading safely. */
static rio *disklessLoadingRio = NULL;
/* --------------------------- Utility functions ---------------------------- */
/* Returns 1 if the replica is rdbchannel and there is an associated main
* channel slave with that. */
int replicationCheckHasMainChannel(client *replica) {
if (!(replica->flags & CLIENT_REPL_RDB_CHANNEL) ||
!replica->main_ch_client_id ||
lookupClientByID(replica->main_ch_client_id) == NULL)
{
return 0;
}
return 1;
}
/* During rdb channel replication, replica opens two connections. From master
* POV, these connections are distinct replicas in server.slaves. This function
* counts associated replicas as one and returns logical replica count. */
unsigned long replicationLogicalReplicaCount(void) {
unsigned long count = 0;
listNode *ln;
listIter li;
listRewind(server.slaves,&li);
while ((ln = listNext(&li))) {
client *replica = listNodeValue(ln);
if (!replicationCheckHasMainChannel(replica))
count++;
}
return count;
}
static ConnectionType *connTypeOfReplication(void) {
if (server.tls_replication) {
return connectionTypeTls();
}
return connectionTypeTcp();
}
/* Return the pointer to a string representing the slave ip:listening_port
* pair. Mostly useful for logging, since we want to log a slave using its
* IP address and its listening port which is more clear for the user, for
* example: "Closing connection with replica 10.1.2.3:6380". */
char *replicationGetSlaveName(client *c) {
static char buf[NET_HOST_PORT_STR_LEN];
char ip[NET_IP_STR_LEN];
ip[0] = '\0';
buf[0] = '\0';
if (c->slave_addr ||
connAddrPeerName(c->conn,ip,sizeof(ip),NULL) != -1)
{
char *addr = c->slave_addr ? c->slave_addr : ip;
if (c->slave_listening_port)
formatAddr(buf,sizeof(buf),addr,c->slave_listening_port);
else
snprintf(buf,sizeof(buf),"%s:<unknown-replica-port>",addr);
} else {
snprintf(buf,sizeof(buf),"client id #%llu",
(unsigned long long) c->id);
}
return buf;
}
/* Plain unlink() can block for quite some time in order to actually apply
* the file deletion to the filesystem. This call removes the file in a
* background thread instead. We actually just do close() in the thread,
* by using the fact that if there is another instance of the same file open,
* the foreground unlink() will only remove the fs name, and deleting the
* file's storage space will only happen once the last reference is lost. */
int bg_unlink(const char *filename) {
int fd = open(filename,O_RDONLY|O_NONBLOCK);
if (fd == -1) {
/* Can't open the file? Fall back to unlinking in the main thread. */
return unlink(filename);
} else {
/* The following unlink() removes the name but doesn't free the
* file contents because a process still has it open. */
int retval = unlink(filename);
if (retval == -1) {
/* If we got an unlink error, we just return it, closing the
* new reference we have to the file. */
int old_errno = errno;
close(fd); /* This would overwrite our errno. So we saved it. */
errno = old_errno;
return -1;
}
bioCreateCloseJob(fd, 0, 0);
return 0; /* Success. */
}
}
/* ---------------------------------- MASTER -------------------------------- */
void createReplicationBacklog(void) {
serverAssert(server.repl_backlog == NULL);
server.repl_backlog = zmalloc(sizeof(replBacklog));
server.repl_backlog->ref_repl_buf_node = NULL;
server.repl_backlog->unindexed_count = 0;
server.repl_backlog->blocks_index = raxNew();
server.repl_backlog->histlen = 0;
/* We don't have any data inside our buffer, but virtually the first
* byte we have is the next byte that will be generated for the
* replication stream. */
server.repl_backlog->offset = server.master_repl_offset+1;
}
/* This function is called when the user modifies the replication backlog
* size at runtime. It is up to the function to resize the buffer and setup it
* so that it contains the same data as the previous one (possibly less data,
* but the most recent bytes, or the same data and more free space in case the
* buffer is enlarged). */
void resizeReplicationBacklog(void) {
if (server.repl_backlog_size < CONFIG_REPL_BACKLOG_MIN_SIZE)
server.repl_backlog_size = CONFIG_REPL_BACKLOG_MIN_SIZE;
if (server.repl_backlog)
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
}
void freeReplicationBacklog(void) {
serverAssert(listLength(server.slaves) == 0);
if (server.repl_backlog == NULL) return;
/* Decrease the start buffer node reference count. */
if (server.repl_backlog->ref_repl_buf_node) {
replBufBlock *o = listNodeValue(
server.repl_backlog->ref_repl_buf_node);
serverAssert(o->refcount == 1); /* Last reference. */
o->refcount--;
}
/* Replication buffer blocks are completely released when we free the
* backlog, since the backlog is released only when there are no replicas
* and the backlog keeps the last reference of all blocks. */
freeReplicationBacklogRefMemAsync(server.repl_buffer_blocks,
server.repl_backlog->blocks_index);
resetReplicationBuffer();
zfree(server.repl_backlog);
server.repl_backlog = NULL;
}
/* To make search offset from replication buffer blocks quickly
* when replicas ask partial resynchronization, we create one index
* block every REPL_BACKLOG_INDEX_PER_BLOCKS blocks. */
void createReplicationBacklogIndex(listNode *ln) {
server.repl_backlog->unindexed_count++;
if (server.repl_backlog->unindexed_count >= REPL_BACKLOG_INDEX_PER_BLOCKS) {
replBufBlock *o = listNodeValue(ln);
uint64_t encoded_offset = htonu64(o->repl_offset);
raxInsert(server.repl_backlog->blocks_index,
(unsigned char*)&encoded_offset, sizeof(uint64_t),
ln, NULL);
server.repl_backlog->unindexed_count = 0;
}
}
/* Rebase replication buffer blocks' offset since the initial
* setting offset starts from 0 when master restart. */
void rebaseReplicationBuffer(long long base_repl_offset) {
raxFree(server.repl_backlog->blocks_index);
server.repl_backlog->blocks_index = raxNew();
server.repl_backlog->unindexed_count = 0;
listIter li;
listNode *ln;
listRewind(server.repl_buffer_blocks, &li);
while ((ln = listNext(&li))) {
replBufBlock *o = listNodeValue(ln);
o->repl_offset += base_repl_offset;
createReplicationBacklogIndex(ln);
}
}
void resetReplicationBuffer(void) {
server.repl_buffer_mem = 0;
server.repl_buffer_blocks = listCreate();
listSetFreeMethod(server.repl_buffer_blocks, zfree);
}
int canFeedReplicaReplBuffer(client *replica) {
/* Don't feed replicas that only want the RDB. */
if (replica->flags & CLIENT_REPL_RDBONLY) return 0;
/* Don't feed replicas that are still waiting for BGSAVE to start. */
if (replica->replstate == SLAVE_STATE_WAIT_BGSAVE_START ||
replica->replstate == SLAVE_STATE_WAIT_RDB_CHANNEL) return 0;
/* Don't feed replicas that are going to be closed ASAP. */
if (replica->flags & CLIENT_CLOSE_ASAP) return 0;
return 1;
}
/* Create the replication backlog if needed. */
void createReplicationBacklogIfNeeded(void) {
if (listLength(server.slaves) == 1 && server.repl_backlog == NULL) {
/* When we create the backlog from scratch, we always use a new
* replication ID and clear the ID2, since there is no valid
* past history. */
changeReplicationId();
clearReplicationId2();
createReplicationBacklog();
serverLog(LL_NOTICE,"Replication backlog created, my new "
"replication IDs are '%s' and '%s'",
server.replid, server.replid2);
}
}
/* Similar with 'prepareClientToWrite', note that we must call this function
* before feeding replication stream into global replication buffer, since
* clientHasPendingReplies in prepareClientToWrite will access the global
* replication buffer to make judgements. */
int prepareReplicasToWrite(void) {
listIter li;
listNode *ln;
int prepared = 0;
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
if (!canFeedReplicaReplBuffer(slave)) continue;
if (prepareClientToWrite(slave) == C_ERR) continue;
prepared++;
}
return prepared;
}
/* Wrapper for feedReplicationBuffer() that takes Redis string objects
* as input. */
void feedReplicationBufferWithObject(robj *o) {
char llstr[LONG_STR_SIZE];
void *p;
size_t len;
if (o->encoding == OBJ_ENCODING_INT) {
len = ll2string(llstr,sizeof(llstr),(long)o->ptr);
p = llstr;
} else {
len = sdslen(o->ptr);
p = o->ptr;
}
feedReplicationBuffer(p,len);
}
/* Generally, we only have one replication buffer block to trim when replication
* backlog size exceeds our setting and no replica reference it. But if replica
* clients disconnect, we need to free many replication buffer blocks that are
* referenced. It would cost much time if there are a lots blocks to free, that
* will freeze server, so we trim replication backlog incrementally. */
void incrementalTrimReplicationBacklog(size_t max_blocks) {
serverAssert(server.repl_backlog != NULL);
size_t trimmed_blocks = 0;
while (server.repl_backlog->histlen > server.repl_backlog_size &&
trimmed_blocks < max_blocks)
{
/* We never trim backlog to less than one block. */
if (listLength(server.repl_buffer_blocks) <= 1) break;
/* Replicas increment the refcount of the first replication buffer block
* they refer to, in that case, we don't trim the backlog even if
* backlog_histlen exceeds backlog_size. This implicitly makes backlog
* bigger than our setting, but makes the master accept partial resync as
* much as possible. So that backlog must be the last reference of
* replication buffer blocks. */
listNode *first = listFirst(server.repl_buffer_blocks);
serverAssert(first == server.repl_backlog->ref_repl_buf_node);
replBufBlock *fo = listNodeValue(first);
if (fo->refcount != 1) break;
/* We don't try trim backlog if backlog valid size will be lessen than
* setting backlog size once we release the first repl buffer block. */
if (server.repl_backlog->histlen - (long long)fo->size <=
server.repl_backlog_size) break;
/* Decr refcount and release the first block later. */
fo->refcount--;
trimmed_blocks++;
server.repl_backlog->histlen -= fo->size;
/* Go to use next replication buffer block node. */
listNode *next = listNextNode(first);
server.repl_backlog->ref_repl_buf_node = next;
serverAssert(server.repl_backlog->ref_repl_buf_node != NULL);
/* Incr reference count to keep the new head node. */
((replBufBlock *)listNodeValue(next))->refcount++;
/* Remove the node in recorded blocks. */
uint64_t encoded_offset = htonu64(fo->repl_offset);
raxRemove(server.repl_backlog->blocks_index,
(unsigned char*)&encoded_offset, sizeof(uint64_t), NULL);
/* Delete the first node from global replication buffer. */
serverAssert(fo->refcount == 0 && fo->used == fo->size);
server.repl_buffer_mem -= (fo->size +
sizeof(listNode) + sizeof(replBufBlock));
listDelNode(server.repl_buffer_blocks, first);
}
/* Set the offset of the first byte we have in the backlog. */
server.repl_backlog->offset = server.master_repl_offset -
server.repl_backlog->histlen + 1;
}
/* Free replication buffer blocks that are referenced by this client. */
void freeReplicaReferencedReplBuffer(client *replica) {
if (replica->ref_repl_buf_node != NULL) {
/* Decrease the start buffer node reference count. */
replBufBlock *o = listNodeValue(replica->ref_repl_buf_node);
serverAssert(o->refcount > 0);
o->refcount--;
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
}
replica->ref_repl_buf_node = NULL;
replica->ref_block_pos = 0;
}
/* Append bytes into the global replication buffer list, replication backlog and
* all replica clients use replication buffers collectively, this function replace
* 'addReply*', 'feedReplicationBacklog' for replicas and replication backlog,
* First we add buffer into global replication buffer block list, and then
* update replica / replication-backlog referenced node and block position. */
void feedReplicationBuffer(char *s, size_t len) {
static long long repl_block_id = 0;
if (server.repl_backlog == NULL) return;
while(len > 0) {
size_t start_pos = 0; /* The position of referenced block to start sending. */
listNode *start_node = NULL; /* Replica/backlog starts referenced node. */
int add_new_block = 0; /* Create new block if current block is total used. */
listNode *ln = listLast(server.repl_buffer_blocks);
replBufBlock *tail = ln ? listNodeValue(ln) : NULL;
/* Append to tail string when possible. */
if (tail && tail->size > tail->used) {
start_node = listLast(server.repl_buffer_blocks);
start_pos = tail->used;
/* Copy the part we can fit into the tail, and leave the rest for a
* new node */
size_t avail = tail->size - tail->used;
size_t copy = (avail >= len) ? len : avail;
memcpy(tail->buf + tail->used, s, copy);
tail->used += copy;
s += copy;
len -= copy;
server.master_repl_offset += copy;
server.repl_backlog->histlen += copy;
}
if (len) {
/* Create a new node, make sure it is allocated to at
* least PROTO_REPLY_CHUNK_BYTES */
size_t usable_size;
/* Avoid creating nodes smaller than PROTO_REPLY_CHUNK_BYTES, so that we can append more data into them,
* and also avoid creating nodes bigger than repl_backlog_size / 16, so that we won't have huge nodes that can't
* trim when we only still need to hold a small portion from them. */
size_t limit = max((size_t)server.repl_backlog_size / 16, (size_t)PROTO_REPLY_CHUNK_BYTES);
size_t size = min(max(len, (size_t)PROTO_REPLY_CHUNK_BYTES), limit);
tail = zmalloc_usable(size + sizeof(replBufBlock), &usable_size);
/* Take over the allocation's internal fragmentation */
tail->size = usable_size - sizeof(replBufBlock);
size_t copy = (tail->size >= len) ? len : tail->size;
tail->used = copy;
tail->refcount = 0;
tail->repl_offset = server.master_repl_offset + 1;
tail->id = repl_block_id++;
memcpy(tail->buf, s, copy);
listAddNodeTail(server.repl_buffer_blocks, tail);
/* We also count the list node memory into replication buffer memory. */
server.repl_buffer_mem += (usable_size + sizeof(listNode));
add_new_block = 1;
if (start_node == NULL) {
start_node = listLast(server.repl_buffer_blocks);
start_pos = 0;
}
s += copy;
len -= copy;
server.master_repl_offset += copy;
server.repl_backlog->histlen += copy;
}
/* For output buffer of replicas. */
listIter li;
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
if (!canFeedReplicaReplBuffer(slave)) continue;
/* Update shared replication buffer start position. */
if (slave->ref_repl_buf_node == NULL) {
slave->ref_repl_buf_node = start_node;
slave->ref_block_pos = start_pos;
/* Only increase the start block reference count. */
((replBufBlock *)listNodeValue(start_node))->refcount++;
}
/* Check output buffer limit only when add new block. */
if (add_new_block) closeClientOnOutputBufferLimitReached(slave, 1);
}
/* For replication backlog */
if (server.repl_backlog->ref_repl_buf_node == NULL) {
server.repl_backlog->ref_repl_buf_node = start_node;
/* Only increase the start block reference count. */
((replBufBlock *)listNodeValue(start_node))->refcount++;
/* Replication buffer must be empty before adding replication stream
* into replication backlog. */
serverAssert(add_new_block == 1 && start_pos == 0);
}
if (add_new_block) {
createReplicationBacklogIndex(listLast(server.repl_buffer_blocks));
/* It is important to trim after adding replication data to keep the backlog size close to
* repl_backlog_size in the common case. We wait until we add a new block to avoid repeated
* unnecessary trimming attempts when small amounts of data are added. See comments in
* freeMemoryGetNotCountedMemory() for details on replication backlog memory tracking. */
incrementalTrimReplicationBacklog(REPL_BACKLOG_TRIM_BLOCKS_PER_CALL);
}
}
}
/* Propagate write commands to replication stream.
*
* This function is used if the instance is a master: we use the commands
* received by our clients in order to create the replication stream.
* Instead if the instance is a replica and has sub-replicas attached, we use
* replicationFeedStreamFromMasterStream() */
void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) {
int j, len;
char llstr[LONG_STR_SIZE];
/* In case we propagate a command that doesn't touch keys (PING, REPLCONF) we
* pass dbid=-1 that indicate there is no need to replicate `select` command. */
serverAssert(dictid == -1 || (dictid >= 0 && dictid < server.dbnum));
/* If the instance is not a top level master, return ASAP: we'll just proxy
* the stream of data we receive from our master instead, in order to
* propagate *identical* replication stream. In this way this slave can
* advertise the same replication ID as the master (since it shares the
* master replication history and has the same backlog and offsets). */
if (server.masterhost != NULL) return;
/* If there aren't slaves, and there is no backlog buffer to populate,
* we can return ASAP. */
if (server.repl_backlog == NULL && listLength(slaves) == 0) {
/* We increment the repl_offset anyway, since we use that for tracking AOF fsyncs
* even when there's no replication active. This code will not be reached if AOF
* is also disabled. */
server.master_repl_offset += 1;
return;
}
/* We can't have slaves attached and no backlog. */
serverAssert(!(listLength(slaves) != 0 && server.repl_backlog == NULL));
/* Update the time of sending replication stream to replicas. */
server.repl_stream_lastio = server.unixtime;
/* Must install write handler for all replicas first before feeding
* replication stream. */
prepareReplicasToWrite();
/* Send SELECT command to every slave if needed. */
if (dictid != -1 && server.slaveseldb != dictid) {
robj *selectcmd;
/* For a few DBs we have pre-computed SELECT command. */
if (dictid >= 0 && dictid < PROTO_SHARED_SELECT_CMDS) {
selectcmd = shared.select[dictid];
} else {
int dictid_len;
dictid_len = ll2string(llstr,sizeof(llstr),dictid);
selectcmd = createObject(OBJ_STRING,
sdscatprintf(sdsempty(),
"*2\r\n$6\r\nSELECT\r\n$%d\r\n%s\r\n",
dictid_len, llstr));
}
feedReplicationBufferWithObject(selectcmd);
if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS)
decrRefCount(selectcmd);
server.slaveseldb = dictid;
}
/* Write the command to the replication buffer if any. */
char aux[LONG_STR_SIZE+3];
/* Add the multi bulk reply length. */
aux[0] = '*';
len = ll2string(aux+1,sizeof(aux)-1,argc);
aux[len+1] = '\r';
aux[len+2] = '\n';
feedReplicationBuffer(aux,len+3);
for (j = 0; j < argc; j++) {
long objlen = stringObjectLen(argv[j]);
/* We need to feed the buffer with the object as a bulk reply
* not just as a plain string, so create the $..CRLF payload len
* and add the final CRLF */
aux[0] = '$';
len = ll2string(aux+1,sizeof(aux)-1,objlen);
aux[len+1] = '\r';
aux[len+2] = '\n';
feedReplicationBuffer(aux,len+3);
feedReplicationBufferWithObject(argv[j]);
feedReplicationBuffer(aux+len+1,2);
}
}
/* This is a debugging function that gets called when we detect something
* wrong with the replication protocol: the goal is to peek into the
* replication backlog and show a few final bytes to make simpler to
* guess what kind of bug it could be. */
void showLatestBacklog(void) {
if (server.repl_backlog == NULL) return;
if (listLength(server.repl_buffer_blocks) == 0) return;
if (server.hide_user_data_from_log) {
serverLog(LL_NOTICE,"hide-user-data-from-log is on, skip logging backlog content to avoid spilling PII.");
return;
}
size_t dumplen = 256;
if (server.repl_backlog->histlen < (long long)dumplen)
dumplen = server.repl_backlog->histlen;
sds dump = sdsempty();
listNode *node = listLast(server.repl_buffer_blocks);
while(dumplen) {
if (node == NULL) break;
replBufBlock *o = listNodeValue(node);
size_t thislen = o->used >= dumplen ? dumplen : o->used;
sds head = sdscatrepr(sdsempty(), o->buf+o->used-thislen, thislen);
sds tmp = sdscatsds(head, dump);
sdsfree(dump);
dump = tmp;
dumplen -= thislen;
node = listPrevNode(node);
}
/* Finally log such bytes: this is vital debugging info to
* understand what happened. */
serverLog(LL_NOTICE,"Latest backlog is: '%s'", dump);
sdsfree(dump);
}
/* This function is used in order to proxy what we receive from our master
* to our sub-slaves. */
#include <ctype.h>
void replicationFeedStreamFromMasterStream(char *buf, size_t buflen) {
/* There must be replication backlog if having attached slaves. */
if (listLength(server.slaves)) serverAssert(server.repl_backlog != NULL);
if (server.repl_backlog) {
/* Must install write handler for all replicas first before feeding
* replication stream. */
prepareReplicasToWrite();
feedReplicationBuffer(buf,buflen);
}
}
void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc) {
/* Fast path to return if the monitors list is empty or the server is in loading. */
if (monitors == NULL || listLength(monitors) == 0 || server.loading) return;
listNode *ln;
listIter li;
int j;
sds cmdrepr = sdsnew("+");
robj *cmdobj;
struct timeval tv;
gettimeofday(&tv,NULL);
cmdrepr = sdscatprintf(cmdrepr,"%ld.%06ld ",(long)tv.tv_sec,(long)tv.tv_usec);
if (c->flags & CLIENT_SCRIPT) {
cmdrepr = sdscatprintf(cmdrepr,"[%d lua] ",dictid);
} else if (c->flags & CLIENT_UNIX_SOCKET) {
cmdrepr = sdscatprintf(cmdrepr,"[%d unix:%s] ",dictid,server.unixsocket);
} else {
cmdrepr = sdscatprintf(cmdrepr,"[%d %s] ",dictid,getClientPeerId(c));
}
for (j = 0; j < argc; j++) {
if (argv[j]->encoding == OBJ_ENCODING_INT) {
cmdrepr = sdscatprintf(cmdrepr, "\"%ld\"", (long)argv[j]->ptr);
} else {
cmdrepr = sdscatrepr(cmdrepr,(char*)argv[j]->ptr,
sdslen(argv[j]->ptr));
}
if (j != argc-1)
cmdrepr = sdscatlen(cmdrepr," ",1);
}
cmdrepr = sdscatlen(cmdrepr,"\r\n",2);
cmdobj = createObject(OBJ_STRING,cmdrepr);
listRewind(monitors,&li);
while((ln = listNext(&li))) {
client *monitor = ln->value;
/* Do not show internal commands to non-internal clients. */
if (c->realcmd && (c->realcmd->flags & CMD_INTERNAL) && !(monitor->flags & CLIENT_INTERNAL)) {
continue;
}
addReply(monitor,cmdobj);
updateClientMemUsageAndBucket(monitor);
}
decrRefCount(cmdobj);
}
/* Feed the slave 'c' with the replication backlog starting from the
* specified 'offset' up to the end of the backlog. */
long long addReplyReplicationBacklog(client *c, long long offset) {
long long skip;
serverLog(LL_DEBUG, "[PSYNC] Replica request offset: %lld", offset);
if (server.repl_backlog->histlen == 0) {
serverLog(LL_DEBUG, "[PSYNC] Backlog history len is zero");
return 0;
}
serverLog(LL_DEBUG, "[PSYNC] Backlog size: %lld",
server.repl_backlog_size);
serverLog(LL_DEBUG, "[PSYNC] First byte: %lld",
server.repl_backlog->offset);
serverLog(LL_DEBUG, "[PSYNC] History len: %lld",
server.repl_backlog->histlen);
/* Compute the amount of bytes we need to discard. */
skip = offset - server.repl_backlog->offset;
serverLog(LL_DEBUG, "[PSYNC] Skipping: %lld", skip);
/* Iterate recorded blocks, quickly search the approximate node. */
listNode *node = NULL;
if (raxSize(server.repl_backlog->blocks_index) > 0) {
uint64_t encoded_offset = htonu64(offset);
raxIterator ri;
raxStart(&ri, server.repl_backlog->blocks_index);
raxSeek(&ri, ">", (unsigned char*)&encoded_offset, sizeof(uint64_t));
if (raxEOF(&ri)) {
/* No found, so search from the last recorded node. */
raxSeek(&ri, "$", NULL, 0);
raxPrev(&ri);
node = (listNode *)ri.data;
} else {
raxPrev(&ri); /* Skip the sought node. */
/* We should search from the prev node since the offset of current
* sought node exceeds searching offset. */
if (raxPrev(&ri))
node = (listNode *)ri.data;
else
node = server.repl_backlog->ref_repl_buf_node;
}
raxStop(&ri);
} else {
/* No recorded blocks, just from the start node to search. */
node = server.repl_backlog->ref_repl_buf_node;
}
/* Search the exact node. */
while (node != NULL) {
replBufBlock *o = listNodeValue(node);
if (o->repl_offset + (long long)o->used >= offset) break;
node = listNextNode(node);
}
serverAssert(node != NULL);
/* Install a writer handler first.*/
prepareClientToWrite(c);
/* Setting output buffer of the replica. */
replBufBlock *o = listNodeValue(node);
o->refcount++;
c->ref_repl_buf_node = node;
c->ref_block_pos = offset - o->repl_offset;
return server.repl_backlog->histlen - skip;
}
/* Return the offset to provide as reply to the PSYNC command received
* from the slave. The returned value is only valid immediately after
* the BGSAVE process started and before executing any other command
* from clients. */
long long getPsyncInitialOffset(void) {
return server.master_repl_offset;
}
/* Send a FULLRESYNC reply in the specific case of a full resynchronization,
* as a side effect setup the slave for a full sync in different ways:
*
* 1) Remember, into the slave client structure, the replication offset
* we sent here, so that if new slaves will later attach to the same
* background RDB saving process (by duplicating this client output
* buffer), we can get the right offset from this slave.
* 2) Set the replication state of the slave to WAIT_BGSAVE_END so that
* we start accumulating differences from this point.
* 3) Force the replication stream to re-emit a SELECT statement so
* the new slave incremental differences will start selecting the
* right database number.
*
* Normally this function should be called immediately after a successful
* BGSAVE for replication was started, or when there is one already in
* progress that we attached our slave to. */
int replicationSetupSlaveForFullResync(client *slave, long long offset) {
char buf[128];
int buflen;
slave->psync_initial_offset = offset;
slave->replstate = SLAVE_STATE_WAIT_BGSAVE_END;
/* We are going to accumulate the incremental changes for this
* slave as well. Set slaveseldb to -1 in order to force to re-emit
* a SELECT statement in the replication stream. */
server.slaveseldb = -1;
/* Don't send this reply to slaves that approached us with
* the old SYNC command. */
if (!(slave->flags & CLIENT_PRE_PSYNC)) {
if (slave->slave_req & SLAVE_REQ_RDB_CHANNEL) {
/* This slave is rdbchannel. Find its associated main channel and
* change its state so we can deliver replication stream from now
* on, in parallel to rdb. */
uint64_t id = slave->main_ch_client_id;
client *c = lookupClientByID(id);
if (c && c->replstate == SLAVE_STATE_WAIT_RDB_CHANNEL) {
c->replstate = SLAVE_STATE_SEND_BULK_AND_STREAM;
serverLog(LL_NOTICE, "Starting to deliver RDB and replication stream to replica: %s",
replicationGetSlaveName(c));
} else {
serverLog(LL_WARNING, "Starting to deliver RDB to replica %s"
" but it has no associated main channel",
replicationGetSlaveName(slave));
}
}
buflen = snprintf(buf,sizeof(buf),"+FULLRESYNC %s %lld\r\n",
server.replid,offset);
if (connWrite(slave->conn,buf,buflen) != buflen) {
freeClientAsync(slave);
return C_ERR;
}
}
return C_OK;
}
/* This function handles the PSYNC command from the point of view of a
* master receiving a request for partial resynchronization.
*
* On success return C_OK, otherwise C_ERR is returned and we proceed
* with the usual full resync. */
int masterTryPartialResynchronization(client *c, long long psync_offset) {
long long psync_len;
char *master_replid = c->argv[1]->ptr;
char buf[128];
int buflen;
/* Is the replication ID of this master the same advertised by the wannabe
* slave via PSYNC? If the replication ID changed this master has a
* different replication history, and there is no way to continue.
*
* Note that there are two potentially valid replication IDs: the ID1
* and the ID2. The ID2 however is only valid up to a specific offset. */
if (strcasecmp(master_replid, server.replid) &&
(strcasecmp(master_replid, server.replid2) ||
psync_offset > server.second_replid_offset))
{
/* Replid "?" is used by slaves that want to force a full resync. */
if (master_replid[0] != '?') {
if (strcasecmp(master_replid, server.replid) &&
strcasecmp(master_replid, server.replid2))
{
serverLog(LL_NOTICE,"Partial resynchronization not accepted: "
"Replication ID mismatch (Replica asked for '%s', my "
"replication IDs are '%s' and '%s')",
master_replid, server.replid, server.replid2);
} else {
serverLog(LL_NOTICE,"Partial resynchronization not accepted: "
"Requested offset for second ID was %lld, but I can reply "
"up to %lld", psync_offset, server.second_replid_offset);
}
} else {
serverLog(LL_NOTICE,"Full resync requested by replica %s %s",
replicationGetSlaveName(c),
c->flags & CLIENT_REPL_RDB_CHANNEL ? "(rdb-channel)" : "");
}
goto need_full_resync;
}
/* We still have the data our slave is asking for? */
if (!server.repl_backlog ||
psync_offset < server.repl_backlog->offset ||
psync_offset > (server.repl_backlog->offset + server.repl_backlog->histlen))
{
serverLog(LL_NOTICE,
"Unable to partial resync with replica %s for lack of backlog (Replica request was: %lld).", replicationGetSlaveName(c), psync_offset);
if (psync_offset > server.master_repl_offset) {
serverLog(LL_WARNING,
"Warning: replica %s tried to PSYNC with an offset that is greater than the master replication offset.", replicationGetSlaveName(c));
}
goto need_full_resync;
}
/* If we reached this point, we are able to perform a partial resync:
* 1) Set client state to make it a slave.
* 2) Inform the client we can continue with +CONTINUE
* 3) Send the backlog data (from the offset to the end) to the slave. */
c->flags |= CLIENT_SLAVE;
c->replstate = SLAVE_STATE_ONLINE;
c->repl_ack_time = server.unixtime;
c->repl_start_cmd_stream_on_ack = 0;
listAddNodeTail(server.slaves,c);
/* We can't use the connection buffers since they are used to accumulate
* new commands at this stage. But we are sure the socket send buffer is
* empty so this write will never fail actually. */
if (c->slave_capa & SLAVE_CAPA_PSYNC2) {
buflen = snprintf(buf,sizeof(buf),"+CONTINUE %s\r\n", server.replid);
} else {
buflen = snprintf(buf,sizeof(buf),"+CONTINUE\r\n");
}
if (connWrite(c->conn,buf,buflen) != buflen) {
freeClientAsync(c);
return C_OK;
}
psync_len = addReplyReplicationBacklog(c,psync_offset);
serverLog(LL_NOTICE,
"Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.",
replicationGetSlaveName(c),
psync_len, psync_offset);
/* Note that we don't need to set the selected DB at server.slaveseldb
* to -1 to force the master to emit SELECT, since the slave already
* has this state from the previous connection with the master. */
refreshGoodSlavesCount();
/* Fire the replica change modules event. */
moduleFireServerEvent(REDISMODULE_EVENT_REPLICA_CHANGE,
REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE,
NULL);
return C_OK; /* The caller can return, no full resync needed. */
need_full_resync:
/* We need a full resync for some reason... Note that we can't
* reply to PSYNC right now if a full SYNC is needed. The reply
* must include the master offset at the time the RDB file we transfer
* is generated, so we need to delay the reply to that moment. */
return C_ERR;
}
/* Start a BGSAVE for replication goals, which is, selecting the disk or
* socket target depending on the configuration, and making sure that
* the script cache is flushed before to start.
*
* The mincapa argument is the bitwise AND among all the slaves capabilities
* of the slaves waiting for this BGSAVE, so represents the slave capabilities
* all the slaves support. Can be tested via SLAVE_CAPA_* macros.
*
* Side effects, other than starting a BGSAVE:
*
* 1) Handle the slaves in WAIT_START state, by preparing them for a full
* sync if the BGSAVE was successfully started, or sending them an error
* and dropping them from the list of slaves.
*
* 2) Flush the Lua scripting script cache if the BGSAVE was actually
* started.
*
* Returns C_OK on success or C_ERR otherwise. */
int startBgsaveForReplication(int mincapa, int req) {
int retval;
int socket_target = 0;
listIter li;
listNode *ln;
/* We use a socket target if slave can handle the EOF marker and we're configured to do diskless syncs.
* Note that in case we're creating a "filtered" RDB (functions-only, for example) we also force socket replication
* to avoid overwriting the snapshot RDB file with filtered data. */
socket_target = (server.repl_diskless_sync || req & SLAVE_REQ_RDB_MASK) && (mincapa & SLAVE_CAPA_EOF);
/* `SYNC` should have failed with error if we don't support socket and require a filter, assert this here */
serverAssert(socket_target || !(req & SLAVE_REQ_RDB_MASK));
serverLog(LL_NOTICE,"Starting BGSAVE for SYNC with target: %s%s",
socket_target ? "replicas sockets" : "disk",
(req & SLAVE_REQ_RDB_CHANNEL) ? " (rdb-channel)" : "");
rdbSaveInfo rsi, *rsiptr;
rsiptr = rdbPopulateSaveInfo(&rsi);
/* Only do rdbSave* when rsiptr is not NULL,
* otherwise slave will miss repl-stream-db. */
if (rsiptr) {
if (socket_target)
retval = rdbSaveToSlavesSockets(req,rsiptr);
else {
/* Keep the page cache since it'll get used soon */
retval = rdbSaveBackground(req, server.rdb_filename, rsiptr, RDBFLAGS_REPLICATION | RDBFLAGS_KEEP_CACHE);
}
if (server.repl_debug_pause & REPL_DEBUG_AFTER_FORK)
debugPauseProcess();
} else {
serverLog(LL_WARNING,"BGSAVE for replication: replication information not available, can't generate the RDB file right now. Try later.");
retval = C_ERR;
}
/* If we succeeded to start a BGSAVE with disk target, let's remember
* this fact, so that we can later delete the file if needed. Note
* that we don't set the flag to 1 if the feature is disabled, otherwise
* it would never be cleared: the file is not deleted. This way if
* the user enables it later with CONFIG SET, we are fine. */
if (retval == C_OK && !socket_target && server.rdb_del_sync_files)
RDBGeneratedByReplication = 1;
/* If we failed to BGSAVE, remove the slaves waiting for a full
* resynchronization from the list of slaves, inform them with
* an error about what happened, close the connection ASAP. */
if (retval == C_ERR) {
serverLog(LL_WARNING,"BGSAVE for replication failed");
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;
if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) {
slave->replstate = REPL_STATE_NONE;
slave->flags &= ~CLIENT_SLAVE;
listDelNode(server.slaves,ln);
addReplyError(slave,
"BGSAVE failed, replication can't continue");
slave->flags |= CLIENT_CLOSE_AFTER_REPLY;
}
}
return retval;
}
/* If the target is socket, rdbSaveToSlavesSockets() already setup
* the slaves for a full resync. Otherwise for disk target do it now.*/
if (!socket_target) {
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = ln->value;