-
Notifications
You must be signed in to change notification settings - Fork 5.9k
/
Copy pathautoid.go
1371 lines (1264 loc) · 44.9 KB
/
autoid.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package autoid
import (
"bytes"
"context"
"fmt"
"math"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/autoid"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/meta"
"github.com/pingcap/tidb/pkg/meta/model"
"github.com/pingcap/tidb/pkg/metrics"
"github.com/pingcap/tidb/pkg/parser/mysql"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/dbterror"
"github.com/pingcap/tidb/pkg/util/execdetails"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/pingcap/tidb/pkg/util/tracing"
"github.com/tikv/client-go/v2/txnkv/txnsnapshot"
tikvutil "github.com/tikv/client-go/v2/util"
"go.uber.org/zap"
)
// Attention:
// For reading cluster TiDB memory tables, the system schema/table should be same.
// Once the system schema/table id been allocated, it can't be changed any more.
// Change the system schema/table id may have the compatibility problem.
const (
// SystemSchemaIDFlag is the system schema/table id flag, uses the highest bit position as system schema ID flag, it's exports for test.
SystemSchemaIDFlag = 1 << 62
// InformationSchemaDBID is the information_schema schema id, it's exports for test.
InformationSchemaDBID int64 = SystemSchemaIDFlag | 1
// PerformanceSchemaDBID is the performance_schema schema id, it's exports for test.
PerformanceSchemaDBID int64 = SystemSchemaIDFlag | 10000
// MetricSchemaDBID is the metrics_schema schema id, it's exported for test.
MetricSchemaDBID int64 = SystemSchemaIDFlag | 20000
)
const (
minStep = 30000
maxStep = 2000000
defaultConsumeTime = 10 * time.Second
minIncrement = 1
maxIncrement = 65535
)
// RowIDBitLength is the bit number of a row id in TiDB.
const RowIDBitLength = 64
const (
// AutoRandomShardBitsDefault is the default number of shard bits.
AutoRandomShardBitsDefault = 5
// AutoRandomRangeBitsDefault is the default number of range bits.
AutoRandomRangeBitsDefault = 64
// AutoRandomShardBitsMax is the max number of shard bits.
AutoRandomShardBitsMax = 15
// AutoRandomRangeBitsMax is the max number of range bits.
AutoRandomRangeBitsMax = 64
// AutoRandomRangeBitsMin is the min number of range bits.
AutoRandomRangeBitsMin = 32
// AutoRandomIncBitsMin is the min number of auto random incremental bits.
AutoRandomIncBitsMin = 27
)
// AutoRandomShardBitsNormalize normalizes the auto random shard bits.
func AutoRandomShardBitsNormalize(shard int, colName string) (ret uint64, err error) {
if shard == types.UnspecifiedLength {
return AutoRandomShardBitsDefault, nil
}
if shard <= 0 {
return 0, dbterror.ErrInvalidAutoRandom.FastGenByArgs(AutoRandomNonPositive)
}
if shard > AutoRandomShardBitsMax {
errMsg := fmt.Sprintf(AutoRandomOverflowErrMsg, AutoRandomShardBitsMax, shard, colName)
return 0, dbterror.ErrInvalidAutoRandom.FastGenByArgs(errMsg)
}
return uint64(shard), nil
}
// AutoRandomRangeBitsNormalize normalizes the auto random range bits.
func AutoRandomRangeBitsNormalize(rangeBits int) (ret uint64, err error) {
if rangeBits == types.UnspecifiedLength {
return AutoRandomRangeBitsDefault, nil
}
if rangeBits < AutoRandomRangeBitsMin || rangeBits > AutoRandomRangeBitsMax {
errMsg := fmt.Sprintf(AutoRandomInvalidRangeBits, AutoRandomRangeBitsMin, AutoRandomRangeBitsMax, rangeBits)
return 0, dbterror.ErrInvalidAutoRandom.FastGenByArgs(errMsg)
}
return uint64(rangeBits), nil
}
// AllocatorType is the type of allocator for generating auto-id. Different type of allocators use different key-value pairs.
type AllocatorType uint8
const (
// RowIDAllocType indicates the allocator is used to allocate row id.
RowIDAllocType AllocatorType = iota
// AutoIncrementType indicates the allocator is used to allocate auto increment value.
AutoIncrementType
// AutoRandomType indicates the allocator is used to allocate auto-shard id.
AutoRandomType
// SequenceType indicates the allocator is used to allocate sequence value.
SequenceType
)
func (a AllocatorType) String() string {
switch a {
case RowIDAllocType:
return "_tidb_rowid"
case AutoIncrementType:
return "auto_increment"
case AutoRandomType:
return "auto_random"
case SequenceType:
return "sequence"
}
return "unknown"
}
// CustomAutoIncCacheOption is one kind of AllocOption to customize the allocator step length.
type CustomAutoIncCacheOption int64
// ApplyOn implements the AllocOption interface.
func (step CustomAutoIncCacheOption) ApplyOn(alloc *allocator) {
if step == 0 {
return
}
alloc.step = int64(step)
alloc.customStep = true
}
// AllocOptionTableInfoVersion is used to pass the TableInfo.Version to the allocator.
type AllocOptionTableInfoVersion uint16
// ApplyOn implements the AllocOption interface.
func (v AllocOptionTableInfoVersion) ApplyOn(alloc *allocator) {
alloc.tbVersion = uint16(v)
}
// AllocOption is a interface to define allocator custom options coming in future.
type AllocOption interface {
ApplyOn(*allocator)
}
// Allocator is an auto increment id generator.
// Just keep id unique actually.
type Allocator interface {
// Alloc allocs N consecutive autoID for table with tableID, returning (min, max] of the allocated autoID batch.
// It gets a batch of autoIDs at a time. So it does not need to access storage for each call.
// The consecutive feature is used to insert multiple rows in a statement.
// increment & offset is used to validate the start position (the allocator's base is not always the last allocated id).
// The returned range is (min, max]:
// case increment=1 & offset=1: you can derive the ids like min+1, min+2... max.
// case increment=x & offset=y: you firstly need to seek to firstID by `SeekToFirstAutoIDXXX`, then derive the IDs like firstID, firstID + increment * 2... in the caller.
Alloc(ctx context.Context, n uint64, increment, offset int64) (int64, int64, error)
// AllocSeqCache allocs sequence batch value cached in table level(rather than in alloc), the returned range covering
// the size of sequence cache with it's increment. The returned round indicates the sequence cycle times if it is with
// cycle option.
AllocSeqCache() (minv, maxv, round int64, err error)
// Rebase rebases the autoID base for table with tableID and the new base value.
// If allocIDs is true, it will allocate some IDs and save to the cache.
// If allocIDs is false, it will not allocate IDs.
Rebase(ctx context.Context, newBase int64, allocIDs bool) error
// ForceRebase set the next global auto ID to newBase.
ForceRebase(newBase int64) error
// RebaseSeq rebases the sequence value in number axis with tableID and the new base value.
RebaseSeq(newBase int64) (int64, bool, error)
// Transfer transfor the ownership of this allocator to another table
Transfer(databaseID, tableID int64) error
// Base return the current base of Allocator.
Base() int64
// End is only used for test.
End() int64
// NextGlobalAutoID returns the next global autoID.
NextGlobalAutoID() (int64, error)
GetType() AllocatorType
}
// Allocators represents a set of `Allocator`s.
type Allocators struct {
SepAutoInc bool
Allocs []Allocator
}
// NewAllocators packs multiple `Allocator`s into Allocators.
func NewAllocators(sepAutoInc bool, allocators ...Allocator) Allocators {
return Allocators{
SepAutoInc: sepAutoInc,
Allocs: allocators,
}
}
// Append add an allocator to the allocators.
func (all Allocators) Append(a Allocator) Allocators {
return Allocators{
SepAutoInc: all.SepAutoInc,
Allocs: append(all.Allocs, a),
}
}
// Get returns the Allocator according to the AllocatorType.
func (all Allocators) Get(allocType AllocatorType) Allocator {
if !all.SepAutoInc {
if allocType == AutoIncrementType {
allocType = RowIDAllocType
}
}
for _, a := range all.Allocs {
if a.GetType() == allocType {
return a
}
}
return nil
}
// Filter filters all the allocators that match pred.
func (all Allocators) Filter(pred func(Allocator) bool) Allocators {
var ret []Allocator
for _, a := range all.Allocs {
if pred(a) {
ret = append(ret, a)
}
}
return Allocators{
SepAutoInc: all.SepAutoInc,
Allocs: ret,
}
}
type allocator struct {
mu sync.Mutex
base int64
end int64
store kv.Storage
// dbID is database ID where it was created.
dbID int64
tbID int64
tbVersion uint16
isUnsigned bool
lastAllocTime time.Time
step int64
customStep bool
allocType AllocatorType
sequence *model.SequenceInfo
}
// Test needs to change it, so it's a variable.
// Don't use it directly, use the GetStep/SetStep function.
var defaultStep = int64(30000)
// GetStep gets the defautStep value.
func GetStep() int64 {
return atomic.LoadInt64(&defaultStep)
}
// SetStep is only used by tests
func SetStep(s int64) {
atomic.StoreInt64(&defaultStep, s)
}
// Base implements autoid.Allocator Base interface.
func (alloc *allocator) Base() int64 {
alloc.mu.Lock()
defer alloc.mu.Unlock()
return alloc.base
}
// End implements autoid.Allocator End interface.
func (alloc *allocator) End() int64 {
alloc.mu.Lock()
defer alloc.mu.Unlock()
return alloc.end
}
// NextGlobalAutoID implements autoid.Allocator NextGlobalAutoID interface.
func (alloc *allocator) NextGlobalAutoID() (int64, error) {
var autoID int64
startTime := time.Now()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(_ context.Context, txn kv.Transaction) error {
var err1 error
autoID, err1 = alloc.getIDAccessor(txn).Get()
if err1 != nil {
return errors.Trace(err1)
}
return nil
})
metrics.AutoIDHistogram.WithLabelValues(metrics.GlobalAutoID, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
if alloc.isUnsigned {
return int64(uint64(autoID) + 1), err
}
return autoID + 1, err
}
// Transfer implements autoid.Allocator Transfer interface.
func (alloc *allocator) Transfer(databaseID, tableID int64) error {
if alloc.dbID == databaseID && alloc.tbID == tableID {
return nil
}
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(_ context.Context, txn kv.Transaction) error {
return alloc.getIDAccessor(txn).CopyTo(databaseID, tableID)
})
if err == nil {
alloc.dbID = databaseID
alloc.tbID = tableID
}
return err
}
func (alloc *allocator) rebase4Unsigned(ctx context.Context, requiredBase uint64, allocIDs bool) error {
// Satisfied by alloc.base, nothing to do.
if requiredBase <= uint64(alloc.base) {
return nil
}
// Satisfied by alloc.end, need to update alloc.base.
if requiredBase <= uint64(alloc.end) {
alloc.base = int64(requiredBase)
return nil
}
ctx, allocatorStats, commitDetail := getAllocatorStatsFromCtx(ctx)
if allocatorStats != nil {
allocatorStats.rebaseCount++
defer func() {
if commitDetail != nil {
allocatorStats.mergeCommitDetail(*commitDetail)
}
}()
}
var newBase, newEnd uint64
startTime := time.Now()
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(_ context.Context, txn kv.Transaction) error {
if allocatorStats != nil {
txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats)
}
idAcc := alloc.getIDAccessor(txn)
currentEnd, err1 := idAcc.Get()
if err1 != nil {
return err1
}
uCurrentEnd := uint64(currentEnd)
if allocIDs {
newBase = max(uCurrentEnd, requiredBase)
newEnd = min(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step)
} else {
if uCurrentEnd >= requiredBase {
newBase = uCurrentEnd
newEnd = uCurrentEnd
// Required base satisfied, we don't need to update KV.
return nil
}
// If we don't want to allocate IDs, for example when creating a table with a given base value,
// We need to make sure when other TiDB server allocates ID for the first time, requiredBase + 1
// will be allocated, so we need to increase the end to exactly the requiredBase.
newBase = requiredBase
newEnd = requiredBase
}
_, err1 = idAcc.Inc(int64(newEnd - uCurrentEnd))
return err1
})
metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
if err != nil {
return err
}
alloc.base, alloc.end = int64(newBase), int64(newEnd)
return nil
}
func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, allocIDs bool) error {
// Satisfied by alloc.base, nothing to do.
if requiredBase <= alloc.base {
return nil
}
// Satisfied by alloc.end, need to update alloc.base.
if requiredBase <= alloc.end {
alloc.base = requiredBase
return nil
}
ctx, allocatorStats, commitDetail := getAllocatorStatsFromCtx(ctx)
if allocatorStats != nil {
allocatorStats.rebaseCount++
defer func() {
if commitDetail != nil {
allocatorStats.mergeCommitDetail(*commitDetail)
}
}()
}
var newBase, newEnd int64
startTime := time.Now()
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(_ context.Context, txn kv.Transaction) error {
if allocatorStats != nil {
txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats)
}
idAcc := alloc.getIDAccessor(txn)
currentEnd, err1 := idAcc.Get()
if err1 != nil {
return err1
}
if allocIDs {
newBase = max(currentEnd, requiredBase)
newEnd = min(math.MaxInt64-alloc.step, newBase) + alloc.step
} else {
if currentEnd >= requiredBase {
newBase = currentEnd
newEnd = currentEnd
// Required base satisfied, we don't need to update KV.
return nil
}
// If we don't want to allocate IDs, for example when creating a table with a given base value,
// We need to make sure when other TiDB server allocates ID for the first time, requiredBase + 1
// will be allocated, so we need to increase the end to exactly the requiredBase.
newBase = requiredBase
newEnd = requiredBase
}
_, err1 = idAcc.Inc(newEnd - currentEnd)
return err1
})
metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
if err != nil {
return err
}
alloc.base, alloc.end = newBase, newEnd
return nil
}
// rebase4Sequence won't alloc batch immediately, cause it won't cache value in allocator.
func (alloc *allocator) rebase4Sequence(requiredBase int64) (int64, bool, error) {
startTime := time.Now()
alreadySatisfied := false
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(_ context.Context, txn kv.Transaction) error {
acc := meta.NewMutator(txn).GetAutoIDAccessors(alloc.dbID, alloc.tbID)
currentEnd, err := acc.SequenceValue().Get()
if err != nil {
return err
}
if alloc.sequence.Increment > 0 {
if currentEnd >= requiredBase {
// Required base satisfied, we don't need to update KV.
alreadySatisfied = true
return nil
}
} else {
if currentEnd <= requiredBase {
// Required base satisfied, we don't need to update KV.
alreadySatisfied = true
return nil
}
}
// If we don't want to allocate IDs, for example when creating a table with a given base value,
// We need to make sure when other TiDB server allocates ID for the first time, requiredBase + 1
// will be allocated, so we need to increase the end to exactly the requiredBase.
_, err = acc.SequenceValue().Inc(requiredBase - currentEnd)
return err
})
// TODO: sequence metrics
metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
if err != nil {
return 0, false, err
}
if alreadySatisfied {
return 0, true, nil
}
return requiredBase, false, err
}
// Rebase implements autoid.Allocator Rebase interface.
// The requiredBase is the minimum base value after Rebase.
// The real base may be greater than the required base.
func (alloc *allocator) Rebase(ctx context.Context, requiredBase int64, allocIDs bool) error {
alloc.mu.Lock()
defer alloc.mu.Unlock()
if alloc.isUnsigned {
return alloc.rebase4Unsigned(ctx, uint64(requiredBase), allocIDs)
}
return alloc.rebase4Signed(ctx, requiredBase, allocIDs)
}
// ForceRebase implements autoid.Allocator ForceRebase interface.
func (alloc *allocator) ForceRebase(requiredBase int64) error {
if requiredBase == -1 {
return ErrAutoincReadFailed.GenWithStack("Cannot force rebase the next global ID to '0'")
}
alloc.mu.Lock()
defer alloc.mu.Unlock()
startTime := time.Now()
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(_ context.Context, txn kv.Transaction) error {
idAcc := alloc.getIDAccessor(txn)
currentEnd, err1 := idAcc.Get()
if err1 != nil {
return err1
}
var step int64
if !alloc.isUnsigned {
step = requiredBase - currentEnd
} else {
uRequiredBase, uCurrentEnd := uint64(requiredBase), uint64(currentEnd)
step = int64(uRequiredBase - uCurrentEnd)
}
_, err1 = idAcc.Inc(step)
return err1
})
metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDRebase, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
if err != nil {
return err
}
alloc.base, alloc.end = requiredBase, requiredBase
return nil
}
// Rebase implements autoid.Allocator RebaseSeq interface.
// The return value is quite same as expression function, bool means whether it should be NULL,
// here it will be used in setval expression function (true meaning the set value has been satisfied, return NULL).
// case1:When requiredBase is satisfied with current value, it will return (0, true, nil),
// case2:When requiredBase is successfully set in, it will return (requiredBase, false, nil).
// If some error occurs in the process, return it immediately.
func (alloc *allocator) RebaseSeq(requiredBase int64) (int64, bool, error) {
alloc.mu.Lock()
defer alloc.mu.Unlock()
return alloc.rebase4Sequence(requiredBase)
}
func (alloc *allocator) GetType() AllocatorType {
return alloc.allocType
}
// NextStep return new auto id step according to previous step and consuming time.
func NextStep(curStep int64, consumeDur time.Duration) int64 {
failpoint.Inject("mockAutoIDCustomize", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(3)
}
})
failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) {
if val.(bool) {
failpoint.Return(GetStep())
}
})
consumeRate := defaultConsumeTime.Seconds() / consumeDur.Seconds()
res := int64(float64(curStep) * consumeRate)
if res < minStep {
return minStep
} else if res > maxStep {
return maxStep
}
return res
}
// MockForTest is exported for testing.
// The actual implementation is in github.com/pingcap/tidb/pkg/autoid_service because of the
// package circle depending issue.
var MockForTest func(kv.Storage) autoid.AutoIDAllocClient
func newSinglePointAlloc(r Requirement, dbID, tblID int64, isUnsigned bool) *singlePointAlloc {
keyspaceID := uint32(r.Store().GetCodec().GetKeyspaceID())
spa := &singlePointAlloc{
dbID: dbID,
tblID: tblID,
isUnsigned: isUnsigned,
keyspaceID: keyspaceID,
}
if r.AutoIDClient() == nil {
// Only for test in mockstore
spa.ClientDiscover = &ClientDiscover{}
spa.mu.AutoIDAllocClient = MockForTest(r.Store())
} else {
spa.ClientDiscover = r.AutoIDClient()
}
// mockAutoIDChange failpoint is not implemented in this allocator, so fallback to use the default one.
failpoint.Inject("mockAutoIDChange", func(val failpoint.Value) {
if val.(bool) {
spa = nil
}
})
return spa
}
// Requirement is the parameter required by NewAllocator
type Requirement interface {
Store() kv.Storage
AutoIDClient() *ClientDiscover
}
// NewAllocator returns a new auto increment id generator on the store.
func NewAllocator(r Requirement, dbID, tbID int64, isUnsigned bool,
allocType AllocatorType, opts ...AllocOption) Allocator {
var store kv.Storage
if r != nil {
store = r.Store()
}
alloc := &allocator{
store: store,
dbID: dbID,
tbID: tbID,
isUnsigned: isUnsigned,
step: GetStep(),
lastAllocTime: time.Now(),
allocType: allocType,
}
for _, fn := range opts {
fn.ApplyOn(alloc)
}
// Use the MySQL compatible AUTO_INCREMENT mode.
if alloc.customStep && alloc.step == 1 && alloc.tbVersion >= model.TableInfoVersion5 {
if allocType == AutoIncrementType {
alloc1 := newSinglePointAlloc(r, dbID, tbID, isUnsigned)
if alloc1 != nil {
return alloc1
}
} else if allocType == RowIDAllocType {
// Now that the autoid and rowid allocator are separated, the AUTO_ID_CACHE 1 setting should not make
// the rowid allocator do not use cache.
alloc.customStep = false
alloc.step = GetStep()
}
}
return alloc
}
// NewSequenceAllocator returns a new sequence value generator on the store.
func NewSequenceAllocator(store kv.Storage, dbID, tbID int64, info *model.SequenceInfo) Allocator {
return &allocator{
store: store,
dbID: dbID,
tbID: tbID,
// Sequence allocator is always signed.
isUnsigned: false,
lastAllocTime: time.Now(),
allocType: SequenceType,
sequence: info,
}
}
// TODO: Handle allocators when changing Table ID during ALTER TABLE t PARTITION BY ...
// NewAllocatorsFromTblInfo creates an array of allocators of different types with the information of model.TableInfo.
func NewAllocatorsFromTblInfo(r Requirement, dbID int64, tblInfo *model.TableInfo) Allocators {
var allocs []Allocator
idCacheOpt := CustomAutoIncCacheOption(tblInfo.AutoIDCache)
tblVer := AllocOptionTableInfoVersion(tblInfo.Version)
hasRowID := !tblInfo.PKIsHandle && !tblInfo.IsCommonHandle
hasAutoIncID := tblInfo.GetAutoIncrementColInfo() != nil
if hasRowID || (hasAutoIncID && !tblInfo.SepAutoInc()) {
alloc := NewAllocator(r, dbID, tblInfo.ID, tblInfo.IsAutoIncColUnsigned(), RowIDAllocType, idCacheOpt, tblVer)
allocs = append(allocs, alloc)
}
if hasAutoIncID && tblInfo.SepAutoInc() {
alloc := NewAllocator(r, dbID, tblInfo.ID, tblInfo.IsAutoIncColUnsigned(), AutoIncrementType, idCacheOpt, tblVer)
allocs = append(allocs, alloc)
}
hasAutoRandID := tblInfo.ContainsAutoRandomBits()
if hasAutoRandID {
alloc := NewAllocator(r, dbID, tblInfo.ID, tblInfo.IsAutoRandomBitColUnsigned(), AutoRandomType, idCacheOpt, tblVer)
allocs = append(allocs, alloc)
}
if tblInfo.IsSequence() {
allocs = append(allocs, NewSequenceAllocator(r.Store(), dbID, tblInfo.ID, tblInfo.Sequence))
}
return NewAllocators(tblInfo.SepAutoInc(), allocs...)
}
// Alloc implements autoid.Allocator Alloc interface.
// For autoIncrement allocator, the increment and offset should always be positive in [1, 65535].
// Attention:
// When increment and offset is not the default value(1), the return range (min, max] need to
// calculate the correct start position rather than simply the add 1 to min. Then you can derive
// the successive autoID by adding increment * cnt to firstID for (n-1) times.
//
// Example:
// (6, 13] is returned, increment = 4, offset = 1, n = 2.
// 6 is the last allocated value for other autoID or handle, maybe with different increment and step,
// but actually we don't care about it, all we need is to calculate the new autoID corresponding to the
// increment and offset at this time now. To simplify the rule is like (ID - offset) % increment = 0,
// so the first autoID should be 9, then add increment to it to get 13.
func (alloc *allocator) Alloc(ctx context.Context, n uint64, increment, offset int64) (minv, maxv int64, err error) {
if alloc.tbID == 0 {
return 0, 0, errInvalidTableID.GenWithStackByArgs("Invalid tableID")
}
if n == 0 {
return 0, 0, nil
}
if alloc.allocType == AutoIncrementType || alloc.allocType == RowIDAllocType {
if !validIncrementAndOffset(increment, offset) {
return 0, 0, errInvalidIncrementAndOffset.GenWithStackByArgs(increment, offset)
}
}
alloc.mu.Lock()
defer alloc.mu.Unlock()
if alloc.isUnsigned {
return alloc.alloc4Unsigned(ctx, n, increment, offset)
}
return alloc.alloc4Signed(ctx, n, increment, offset)
}
func (alloc *allocator) AllocSeqCache() (minv, maxv int64, round int64, err error) {
alloc.mu.Lock()
defer alloc.mu.Unlock()
return alloc.alloc4Sequence()
}
func validIncrementAndOffset(increment, offset int64) bool {
return (increment >= minIncrement && increment <= maxIncrement) && (offset >= minIncrement && offset <= maxIncrement)
}
// CalcNeededBatchSize is used to calculate batch size for autoID allocation.
// It firstly seeks to the first valid position based on increment and offset,
// then plus the length remained, which could be (n-1) * increment.
func CalcNeededBatchSize(base, n, increment, offset int64, isUnsigned bool) int64 {
if increment == 1 {
return n
}
if isUnsigned {
// SeekToFirstAutoIDUnSigned seeks to the next unsigned valid position.
nr := SeekToFirstAutoIDUnSigned(uint64(base), uint64(increment), uint64(offset))
// Calculate the total batch size needed.
nr += (uint64(n) - 1) * uint64(increment)
return int64(nr - uint64(base))
}
nr := SeekToFirstAutoIDSigned(base, increment, offset)
// Calculate the total batch size needed.
nr += (n - 1) * increment
return nr - base
}
// CalcSequenceBatchSize calculate the next sequence batch size.
func CalcSequenceBatchSize(base, size, increment, offset, minv, maxv int64) (int64, error) {
// The sequence is positive growth.
if increment > 0 {
if increment == 1 {
// Sequence is already allocated to the end.
if base >= maxv {
return 0, ErrAutoincReadFailed
}
// The rest of sequence < cache size, return the rest.
if maxv-base < size {
return maxv - base, nil
}
// The rest of sequence is adequate.
return size, nil
}
nr, ok := SeekToFirstSequenceValue(base, increment, offset, minv, maxv)
if !ok {
return 0, ErrAutoincReadFailed
}
// The rest of sequence < cache size, return the rest.
if maxv-nr < (size-1)*increment {
return maxv - base, nil
}
return (nr - base) + (size-1)*increment, nil
}
// The sequence is negative growth.
if increment == -1 {
if base <= minv {
return 0, ErrAutoincReadFailed
}
if base-minv < size {
return base - minv, nil
}
return size, nil
}
nr, ok := SeekToFirstSequenceValue(base, increment, offset, minv, maxv)
if !ok {
return 0, ErrAutoincReadFailed
}
// The rest of sequence < cache size, return the rest.
if nr-minv < (size-1)*(-increment) {
return base - minv, nil
}
return (base - nr) + (size-1)*(-increment), nil
}
// SeekToFirstSequenceValue seeks to the next valid value (must be in range of [MIN, max]),
// the bool indicates whether the first value is got.
// The seeking formula is describe as below:
//
// nr := (base + increment - offset) / increment
//
// first := nr*increment + offset
// Because formula computation will overflow Int64, so we transfer it to uint64 for distance computation.
func SeekToFirstSequenceValue(base, increment, offset, minv, maxv int64) (int64, bool) {
if increment > 0 {
// Sequence is already allocated to the end.
if base >= maxv {
return 0, false
}
uMax := EncodeIntToCmpUint(maxv)
uBase := EncodeIntToCmpUint(base)
uOffset := EncodeIntToCmpUint(offset)
uIncrement := uint64(increment)
if uMax-uBase < uIncrement {
// Enum the possible first value.
for i := uBase + 1; i <= uMax; i++ {
if (i-uOffset)%uIncrement == 0 {
return DecodeCmpUintToInt(i), true
}
}
return 0, false
}
nr := (uBase + uIncrement - uOffset) / uIncrement
nr = nr*uIncrement + uOffset
first := DecodeCmpUintToInt(nr)
return first, true
}
// Sequence is already allocated to the end.
if base <= minv {
return 0, false
}
uMin := EncodeIntToCmpUint(minv)
uBase := EncodeIntToCmpUint(base)
uOffset := EncodeIntToCmpUint(offset)
uIncrement := uint64(-increment)
if uBase-uMin < uIncrement {
// Enum the possible first value.
for i := uBase - 1; i >= uMin; i-- {
if (uOffset-i)%uIncrement == 0 {
return DecodeCmpUintToInt(i), true
}
}
return 0, false
}
nr := (uOffset - uBase + uIncrement) / uIncrement
nr = uOffset - nr*uIncrement
first := DecodeCmpUintToInt(nr)
return first, true
}
// SeekToFirstAutoIDSigned seeks to the next valid signed position.
func SeekToFirstAutoIDSigned(base, increment, offset int64) int64 {
nr := (base + increment - offset) / increment
nr = nr*increment + offset
return nr
}
// SeekToFirstAutoIDUnSigned seeks to the next valid unsigned position.
func SeekToFirstAutoIDUnSigned(base, increment, offset uint64) uint64 {
nr := (base + increment - offset) / increment
nr = nr*increment + offset
return nr
}
func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, offset int64) (minv, maxv int64, err error) {
// Check offset rebase if necessary.
if offset-1 > alloc.base {
if err := alloc.rebase4Signed(ctx, offset-1, true); err != nil {
return 0, 0, err
}
}
// CalcNeededBatchSize calculates the total batch size needed.
n1 := CalcNeededBatchSize(alloc.base, int64(n), increment, offset, alloc.isUnsigned)
// Condition alloc.base+N1 > alloc.end will overflow when alloc.base + N1 > MaxInt64. So need this.
if math.MaxInt64-alloc.base <= n1 {
return 0, 0, ErrAutoincReadFailed
}
// The local rest is not enough for allocN, skip it.
if alloc.base+n1 > alloc.end {
var newBase, newEnd int64
startTime := time.Now()
nextStep := alloc.step
if !alloc.customStep && alloc.end > 0 {
// Although it may skip a segment here, we still think it is consumed.
consumeDur := startTime.Sub(alloc.lastAllocTime)
nextStep = NextStep(alloc.step, consumeDur)
}
ctx, allocatorStats, commitDetail := getAllocatorStatsFromCtx(ctx)
if allocatorStats != nil {
allocatorStats.allocCount++
defer func() {
if commitDetail != nil {
allocatorStats.mergeCommitDetail(*commitDetail)
}
}()
}
ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta)
err := kv.RunInNewTxn(ctx, alloc.store, true, func(ctx context.Context, txn kv.Transaction) error {
defer tracing.StartRegion(ctx, "alloc.alloc4Signed").End()
if allocatorStats != nil {
txn.SetOption(kv.CollectRuntimeStats, allocatorStats.SnapshotRuntimeStats)
}
idAcc := alloc.getIDAccessor(txn)
var err1 error
newBase, err1 = idAcc.Get()
if err1 != nil {
return err1
}
// CalcNeededBatchSize calculates the total batch size needed on global base.
n1 = CalcNeededBatchSize(newBase, int64(n), increment, offset, alloc.isUnsigned)
// Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch.
if nextStep < n1 {
nextStep = n1
}
tmpStep := min(math.MaxInt64-newBase, nextStep)
// The global rest is not enough for alloc.
if tmpStep < n1 {
return ErrAutoincReadFailed
}
newEnd, err1 = idAcc.Inc(tmpStep)
return err1
})
metrics.AutoIDHistogram.WithLabelValues(metrics.TableAutoIDAlloc, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds())
if err != nil {
return 0, 0, err
}
// Store the step for non-customized-step allocator to calculate next dynamic step.
if !alloc.customStep {
alloc.step = nextStep
}
alloc.lastAllocTime = time.Now()
if newBase == math.MaxInt64 {
return 0, 0, ErrAutoincReadFailed
}
alloc.base, alloc.end = newBase, newEnd
}
if logutil.BgLogger().Core().Enabled(zap.DebugLevel) {
logutil.BgLogger().Debug("alloc N signed ID",
zap.Uint64("from ID", uint64(alloc.base)),
zap.Uint64("to ID", uint64(alloc.base+n1)),
zap.Int64("table ID", alloc.tbID),
zap.Int64("database ID", alloc.dbID))
}
minv = alloc.base
alloc.base += n1
return minv, alloc.base, nil
}
func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, offset int64) (minv int64, maxv int64, err error) {
// Check offset rebase if necessary.
if uint64(offset-1) > uint64(alloc.base) {
if err := alloc.rebase4Unsigned(ctx, uint64(offset-1), true); err != nil {
return 0, 0, err
}
}
// CalcNeededBatchSize calculates the total batch size needed.
n1 := CalcNeededBatchSize(alloc.base, int64(n), increment, offset, alloc.isUnsigned)
// Condition alloc.base+n1 > alloc.end will overflow when alloc.base + n1 > MaxInt64. So need this.
if math.MaxUint64-uint64(alloc.base) <= uint64(n1) {
return 0, 0, ErrAutoincReadFailed
}
// The local rest is not enough for alloc, skip it.
if uint64(alloc.base)+uint64(n1) > uint64(alloc.end) {
var newBase, newEnd int64
startTime := time.Now()
nextStep := alloc.step
if !alloc.customStep {
// Although it may skip a segment here, we still treat it as consumed.
consumeDur := startTime.Sub(alloc.lastAllocTime)
nextStep = NextStep(alloc.step, consumeDur)
}
ctx, allocatorStats, commitDetail := getAllocatorStatsFromCtx(ctx)
if allocatorStats != nil {
allocatorStats.allocCount++
defer func() {
if commitDetail != nil {
allocatorStats.mergeCommitDetail(*commitDetail)
}
}()