From ba77a2f58f9d750c72586bb6771dd8127dff4194 Mon Sep 17 00:00:00 2001 From: ZhuoZhi <517770911@qq.com> Date: Thu, 17 Jun 2021 20:38:38 +0800 Subject: [PATCH 01/25] planner: select distinct should bypass batchget (#25477) --- executor/point_get_test.go | 14 ++++++++++++++ planner/core/point_get_plan.go | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/executor/point_get_test.go b/executor/point_get_test.go index 721d7dc00bcb9..8a9dbdbe135f3 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -159,6 +159,20 @@ func (s *testPointGetSuite) TestPointGetDataTooLong(c *C) { tk.MustExec("drop table if exists PK_1389;") } +// issue #25320 +func (s *testPointGetSuite) TestDistinctPlan(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists test_distinct;") + tk.MustExec(`CREATE TABLE test_distinct ( + id bigint(18) NOT NULL COMMENT '主键', + b bigint(18) NOT NULL COMMENT '用户ID', + PRIMARY KEY (id) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci;`) + tk.MustExec("insert into test_distinct values (123456789101112131,223456789101112131),(123456789101112132,223456789101112131);") + tk.MustQuery("select distinct b from test_distinct where id in (123456789101112131,123456789101112132);").Check(testkit.Rows("223456789101112131")) +} + func (s *testPointGetSuite) TestPointGetCharPK(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec(`use test;`) diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 422f72ed4f98d..a1e0f7184dbc8 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -706,7 +706,7 @@ func newBatchPointGetPlan( func tryWhereIn2BatchPointGet(ctx sessionctx.Context, selStmt *ast.SelectStmt) *BatchPointGetPlan { if selStmt.OrderBy != nil || selStmt.GroupBy != nil || - selStmt.Limit != nil || selStmt.Having != nil || + selStmt.Limit != nil || selStmt.Having != nil || selStmt.Distinct || len(selStmt.WindowSpecs) > 0 { return nil } From a9117613ddfdccb49256bd971c31e961e330964a Mon Sep 17 00:00:00 2001 From: disksing Date: Thu, 17 Jun 2021 20:54:40 +0800 Subject: [PATCH 02/25] store/tikv: clean up stale tests (#25488) --- store/tikv/tests/1pc_test.go | 312 ----- store/tikv/tests/2pc_fail_test.go | 134 -- store/tikv/tests/2pc_slow_test.go | 39 - store/tikv/tests/2pc_test.go | 1394 -------------------- store/tikv/tests/async_commit_fail_test.go | 284 ---- store/tikv/tests/async_commit_test.go | 542 -------- store/tikv/tests/client_fp_test.go | 97 -- store/tikv/tests/delete_range_test.go | 153 --- store/tikv/tests/isolation_test.go | 198 --- store/tikv/tests/lock_test.go | 774 ----------- store/tikv/tests/prewrite_test.go | 67 - store/tikv/tests/range_task_test.go | 242 ---- store/tikv/tests/rawkv_test.go | 312 ----- store/tikv/tests/safepoint_test.go | 124 -- store/tikv/tests/scan_mock_test.go | 91 -- store/tikv/tests/scan_test.go | 169 --- store/tikv/tests/snapshot_fail_test.go | 245 ---- store/tikv/tests/snapshot_test.go | 315 ----- store/tikv/tests/split_test.go | 250 ---- store/tikv/tests/store_fail_test.go | 53 - store/tikv/tests/store_test.go | 155 --- store/tikv/tests/ticlient_slow_test.go | 93 -- store/tikv/tests/ticlient_test.go | 131 -- store/tikv/tests/util_test.go | 110 -- 24 files changed, 6284 deletions(-) delete mode 100644 store/tikv/tests/1pc_test.go delete mode 100644 store/tikv/tests/2pc_fail_test.go delete mode 100644 store/tikv/tests/2pc_slow_test.go delete mode 100644 store/tikv/tests/2pc_test.go delete mode 100644 store/tikv/tests/async_commit_fail_test.go delete mode 100644 store/tikv/tests/async_commit_test.go delete mode 100644 store/tikv/tests/client_fp_test.go delete mode 100644 store/tikv/tests/delete_range_test.go delete mode 100644 store/tikv/tests/isolation_test.go delete mode 100644 store/tikv/tests/lock_test.go delete mode 100644 store/tikv/tests/prewrite_test.go delete mode 100644 store/tikv/tests/range_task_test.go delete mode 100644 store/tikv/tests/rawkv_test.go delete mode 100644 store/tikv/tests/safepoint_test.go delete mode 100644 store/tikv/tests/scan_mock_test.go delete mode 100644 store/tikv/tests/scan_test.go delete mode 100644 store/tikv/tests/snapshot_fail_test.go delete mode 100644 store/tikv/tests/snapshot_test.go delete mode 100644 store/tikv/tests/split_test.go delete mode 100644 store/tikv/tests/store_fail_test.go delete mode 100644 store/tikv/tests/store_test.go delete mode 100644 store/tikv/tests/ticlient_slow_test.go delete mode 100644 store/tikv/tests/ticlient_test.go delete mode 100644 store/tikv/tests/util_test.go diff --git a/store/tikv/tests/1pc_test.go b/store/tikv/tests/1pc_test.go deleted file mode 100644 index 0e1b586bd9182..0000000000000 --- a/store/tikv/tests/1pc_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - - . "github.com/pingcap/check" - "github.com/tikv/client-go/v2/metrics" - "github.com/tikv/client-go/v2/mockstore" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/util" -) - -func (s *testAsyncCommitCommon) begin1PC(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.SetEnable1PC(true) - return tikv.TxnProbe{KVTxn: txn} -} - -type testOnePCSuite struct { - OneByOneSuite - testAsyncCommitCommon - bo *tikv.Backoffer -} - -var _ = SerialSuites(&testOnePCSuite{}) - -func (s *testOnePCSuite) SetUpTest(c *C) { - s.testAsyncCommitCommon.setUpTest(c) - s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil) -} - -func (s *testOnePCSuite) Test1PC(c *C) { - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - - k1 := []byte("k1") - v1 := []byte("v1") - - txn := s.begin1PC(c) - err := txn.Set(k1, v1) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsOnePC(), IsTrue) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Equals, txn.GetCommitter().GetCommitTS()) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Greater, txn.StartTS()) - // ttlManager is not used for 1PC. - c.Assert(txn.GetCommitter().IsTTLUninitialized(), IsTrue) - - // 1PC doesn't work if sessionID == 0 - k2 := []byte("k2") - v2 := []byte("v2") - - txn = s.begin1PC(c) - err = txn.Set(k2, v2) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsOnePC(), IsFalse) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Equals, uint64(0)) - c.Assert(txn.GetCommitter().GetCommitTS(), Greater, txn.StartTS()) - - // 1PC doesn't work if system variable not set - - k3 := []byte("k3") - v3 := []byte("v3") - - txn = s.begin(c) - err = txn.Set(k3, v3) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsOnePC(), IsFalse) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Equals, uint64(0)) - c.Assert(txn.GetCommitter().GetCommitTS(), Greater, txn.StartTS()) - - // Test multiple keys - k4 := []byte("k4") - v4 := []byte("v4") - k5 := []byte("k5") - v5 := []byte("v5") - k6 := []byte("k6") - v6 := []byte("v6") - - txn = s.begin1PC(c) - err = txn.Set(k4, v4) - c.Assert(err, IsNil) - err = txn.Set(k5, v5) - c.Assert(err, IsNil) - err = txn.Set(k6, v6) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsOnePC(), IsTrue) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Equals, txn.GetCommitter().GetCommitTS()) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Greater, txn.StartTS()) - // Check keys are committed with the same version - s.mustGetFromSnapshot(c, txn.GetCommitTS(), k4, v4) - s.mustGetFromSnapshot(c, txn.GetCommitTS(), k5, v5) - s.mustGetFromSnapshot(c, txn.GetCommitTS(), k6, v6) - s.mustGetNoneFromSnapshot(c, txn.GetCommitTS()-1, k4) - s.mustGetNoneFromSnapshot(c, txn.GetCommitTS()-1, k5) - s.mustGetNoneFromSnapshot(c, txn.GetCommitTS()-1, k6) - - // Overwriting in MVCC - v6New := []byte("v6new") - txn = s.begin1PC(c) - err = txn.Set(k6, v6New) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsOnePC(), IsTrue) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Equals, txn.GetCommitter().GetCommitTS()) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Greater, txn.StartTS()) - s.mustGetFromSnapshot(c, txn.GetCommitTS(), k6, v6New) - s.mustGetFromSnapshot(c, txn.GetCommitTS()-1, k6, v6) - - // Check all keys - keys := [][]byte{k1, k2, k3, k4, k5, k6} - values := [][]byte{v1, v2, v3, v4, v5, v6New} - ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - snap := s.store.GetSnapshot(ver) - for i, k := range keys { - v, err := snap.Get(ctx, k) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, values[i]) - } -} - -func (s *testOnePCSuite) Test1PCIsolation(c *C) { - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - - k := []byte("k") - v1 := []byte("v1") - - txn := s.begin1PC(c) - txn.Set(k, v1) - err := txn.Commit(ctx) - c.Assert(err, IsNil) - - v2 := []byte("v2") - txn = s.begin1PC(c) - txn.Set(k, v2) - - // Make `txn`'s commitTs more likely to be less than `txn2`'s startTs if there's bug in commitTs - // calculation. - for i := 0; i < 10; i++ { - _, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - } - - txn2 := s.begin1PC(c) - s.mustGetFromTxn(c, txn2, k, v1) - - err = txn.Commit(ctx) - c.Assert(txn.GetCommitter().IsOnePC(), IsTrue) - c.Assert(err, IsNil) - - s.mustGetFromTxn(c, txn2, k, v1) - c.Assert(txn2.Rollback(), IsNil) - - s.mustGetFromSnapshot(c, txn.GetCommitTS(), k, v2) - s.mustGetFromSnapshot(c, txn.GetCommitTS()-1, k, v1) -} - -func (s *testOnePCSuite) Test1PCDisallowMultiRegion(c *C) { - // This test doesn't support tikv mode. - if *mockstore.WithTiKV { - return - } - - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - - txn := s.begin1PC(c) - - keys := []string{"k0", "k1", "k2", "k3"} - values := []string{"v0", "v1", "v2", "v3"} - - err := txn.Set([]byte(keys[0]), []byte(values[0])) - c.Assert(err, IsNil) - err = txn.Set([]byte(keys[3]), []byte(values[3])) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - - // 1PC doesn't work if it affects multiple regions. - loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte(keys[2])) - c.Assert(err, IsNil) - newRegionID := s.cluster.AllocID() - newPeerID := s.cluster.AllocID() - s.cluster.Split(loc.Region.GetID(), newRegionID, []byte(keys[2]), []uint64{newPeerID}, newPeerID) - - txn = s.begin1PC(c) - err = txn.Set([]byte(keys[1]), []byte(values[1])) - c.Assert(err, IsNil) - err = txn.Set([]byte(keys[2]), []byte(values[2])) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsOnePC(), IsFalse) - c.Assert(txn.GetCommitter().GetOnePCCommitTS(), Equals, uint64(0)) - c.Assert(txn.GetCommitter().GetCommitTS(), Greater, txn.StartTS()) - - ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - snap := s.store.GetSnapshot(ver) - for i, k := range keys { - v, err := snap.Get(ctx, []byte(k)) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte(values[i])) - } -} - -// It's just a simple validation of linearizability. -// Extra tests are needed to test this feature with the control of the TiKV cluster. -func (s *testOnePCSuite) Test1PCLinearizability(c *C) { - t1 := s.begin(c) - t2 := s.begin(c) - err := t1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = t2.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - // t2 commits earlier than t1 - err = t2.Commit(ctx) - c.Assert(err, IsNil) - err = t1.Commit(ctx) - c.Assert(err, IsNil) - commitTS1 := t1.GetCommitter().GetCommitTS() - commitTS2 := t2.GetCommitter().GetCommitTS() - c.Assert(commitTS2, Less, commitTS1) -} - -func (s *testOnePCSuite) Test1PCWithMultiDC(c *C) { - // It requires setting placement rules to run with TiKV - if *mockstore.WithTiKV { - return - } - - localTxn := s.begin1PC(c) - err := localTxn.Set([]byte("a"), []byte("a1")) - localTxn.SetScope("bj") - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = localTxn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(localTxn.GetCommitter().IsOnePC(), IsFalse) - - globalTxn := s.begin1PC(c) - err = globalTxn.Set([]byte("b"), []byte("b1")) - globalTxn.SetScope(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - err = globalTxn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(globalTxn.GetCommitter().IsOnePC(), IsTrue) -} - -func (s *testOnePCSuite) TestTxnCommitCounter(c *C) { - initial := metrics.GetTxnCommitCounter() - - // 2PC - txn := s.begin(c) - err := txn.Set([]byte("k"), []byte("v")) - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - curr := metrics.GetTxnCommitCounter() - diff := curr.Sub(initial) - c.Assert(diff.TwoPC, Equals, int64(1)) - c.Assert(diff.AsyncCommit, Equals, int64(0)) - c.Assert(diff.OnePC, Equals, int64(0)) - - // AsyncCommit - txn = s.beginAsyncCommit(c) - err = txn.Set([]byte("k1"), []byte("v1")) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - curr = metrics.GetTxnCommitCounter() - diff = curr.Sub(initial) - c.Assert(diff.TwoPC, Equals, int64(1)) - c.Assert(diff.AsyncCommit, Equals, int64(1)) - c.Assert(diff.OnePC, Equals, int64(0)) - - // 1PC - txn = s.begin1PC(c) - err = txn.Set([]byte("k2"), []byte("v2")) - c.Assert(err, IsNil) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - curr = metrics.GetTxnCommitCounter() - diff = curr.Sub(initial) - c.Assert(diff.TwoPC, Equals, int64(1)) - c.Assert(diff.AsyncCommit, Equals, int64(1)) - c.Assert(diff.OnePC, Equals, int64(1)) -} diff --git a/store/tikv/tests/2pc_fail_test.go b/store/tikv/tests/2pc_fail_test.go deleted file mode 100644 index 25cfb9de85a04..0000000000000 --- a/store/tikv/tests/2pc_fail_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2017 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - "github.com/pingcap/parser/terror" - tikverr "github.com/tikv/client-go/v2/error" -) - -// TestFailCommitPrimaryRpcErrors tests rpc errors are handled properly when -// committing primary region task. -func (s *testCommitterSuite) TestFailCommitPrimaryRpcErrors(c *C) { - c.Assert(failpoint.Enable("tikvclient/rpcCommitResult", `return("timeout")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcCommitResult"), IsNil) - }() - // The rpc error will be wrapped to ErrResultUndetermined. - t1 := s.begin(c) - err := t1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = t1.Commit(context.Background()) - c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue, Commentf("%s", errors.ErrorStack(err))) - - // We don't need to call "Rollback" after "Commit" fails. - err = t1.Rollback() - c.Assert(err, Equals, tikverr.ErrInvalidTxn) -} - -// TestFailCommitPrimaryRegionError tests RegionError is handled properly when -// committing primary region task. -func (s *testCommitterSuite) TestFailCommitPrimaryRegionError(c *C) { - c.Assert(failpoint.Enable("tikvclient/rpcCommitResult", `return("notLeader")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcCommitResult"), IsNil) - }() - // Ensure it returns the original error without wrapped to ErrResultUndetermined - // if it exceeds max retry timeout on RegionError. - t2 := s.begin(c) - err := t2.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - err = t2.Commit(context.Background()) - c.Assert(err, NotNil) - c.Assert(terror.ErrorNotEqual(err, terror.ErrResultUndetermined), IsTrue) -} - -// TestFailCommitPrimaryRPCErrorThenRegionError tests the case when commit first -// receive a rpc timeout, then region errors afterwrards. -func (s *testCommitterSuite) TestFailCommitPrimaryRPCErrorThenRegionError(c *C) { - c.Assert(failpoint.Enable("tikvclient/rpcCommitResult", `1*return("timeout")->return("notLeader")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcCommitResult"), IsNil) - }() - // The region error will be wrapped to ErrResultUndetermined. - t1 := s.begin(c) - err := t1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = t1.Commit(context.Background()) - c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue, Commentf("%s", errors.ErrorStack(err))) -} - -// TestFailCommitPrimaryKeyError tests KeyError is handled properly when -// committing primary region task. -func (s *testCommitterSuite) TestFailCommitPrimaryKeyError(c *C) { - c.Assert(failpoint.Enable("tikvclient/rpcCommitResult", `return("keyError")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcCommitResult"), IsNil) - }() - // Ensure it returns the original error without wrapped to ErrResultUndetermined - // if it meets KeyError. - t3 := s.begin(c) - err := t3.Set([]byte("c"), []byte("c1")) - c.Assert(err, IsNil) - err = t3.Commit(context.Background()) - c.Assert(err, NotNil) - c.Assert(terror.ErrorNotEqual(err, terror.ErrResultUndetermined), IsTrue) -} - -// TestFailCommitPrimaryRPCErrorThenKeyError tests KeyError overwrites the undeterminedErr. -func (s *testCommitterSuite) TestFailCommitPrimaryRPCErrorThenKeyError(c *C) { - c.Assert(failpoint.Enable("tikvclient/rpcCommitResult", `1*return("timeout")->return("keyError")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcCommitResult"), IsNil) - }() - // Ensure it returns the original error without wrapped to ErrResultUndetermined - // if it meets KeyError. - t3 := s.begin(c) - err := t3.Set([]byte("c"), []byte("c1")) - c.Assert(err, IsNil) - err = t3.Commit(context.Background()) - c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsFalse) -} - -func (s *testCommitterSuite) TestFailCommitTimeout(c *C) { - c.Assert(failpoint.Enable("tikvclient/rpcCommitTimeout", `return(true)`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcCommitTimeout"), IsNil) - }() - txn := s.begin(c) - err := txn.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = txn.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - err = txn.Set([]byte("c"), []byte("c1")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, NotNil) - - txn2 := s.begin(c) - value, err := txn2.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(len(value), Greater, 0) - _, err = txn2.Get(context.TODO(), []byte("b")) - c.Assert(err, IsNil) - c.Assert(len(value), Greater, 0) -} diff --git a/store/tikv/tests/2pc_slow_test.go b/store/tikv/tests/2pc_slow_test.go deleted file mode 100644 index f36bc5e1ff8f1..0000000000000 --- a/store/tikv/tests/2pc_slow_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !race - -package tikv_test - -import ( - . "github.com/pingcap/check" -) - -// TestCommitMultipleRegions tests commit multiple regions. -// The test takes too long under the race detector. -func (s *testCommitterSuite) TestCommitMultipleRegions(c *C) { - m := make(map[string]string) - for i := 0; i < 100; i++ { - k, v := randKV(10, 10) - m[k] = v - } - s.mustCommit(c, m) - - // Test big values. - m = make(map[string]string) - for i := 0; i < 50; i++ { - k, v := randKV(11, int(txnCommitBatchSize)/7) - m[k] = v - } - s.mustCommit(c, m) -} diff --git a/store/tikv/tests/2pc_test.go b/store/tikv/tests/2pc_test.go deleted file mode 100644 index 7f8f786e9a23b..0000000000000 --- a/store/tikv/tests/2pc_test.go +++ /dev/null @@ -1,1394 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "fmt" - "math" - "math/rand" - "strings" - "sync" - "sync/atomic" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - drivertxn "github.com/pingcap/tidb/store/driver/txn" - "github.com/tikv/client-go/v2/config" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/mockstore/cluster" - "github.com/tikv/client-go/v2/mockstore/mocktikv" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/tikvrpc" -) - -var ( - txnCommitBatchSize = tikv.ConfigProbe{}.GetTxnCommitBatchSize() - bigTxnThreshold = tikv.ConfigProbe{}.GetBigTxnThreshold() -) - -type testCommitterSuite struct { - OneByOneSuite - cluster cluster.Cluster - store tikv.StoreProbe -} - -var _ = SerialSuites(&testCommitterSuite{}) - -func (s *testCommitterSuite) SetUpSuite(c *C) { - atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // 3s - s.OneByOneSuite.SetUpSuite(c) - atomic.StoreUint64(&tikv.CommitMaxBackoff, 1000) - atomic.StoreUint64(&tikv.VeryLongMaxBackoff, 1000) -} - -func (s *testCommitterSuite) SetUpTest(c *C) { - mvccStore, err := mocktikv.NewMVCCLevelDB("") - c.Assert(err, IsNil) - cluster := mocktikv.NewCluster(mvccStore) - mocktikv.BootstrapWithMultiRegions(cluster, []byte("a"), []byte("b"), []byte("c")) - s.cluster = cluster - client := mocktikv.NewRPCClient(cluster, mvccStore, nil) - pdCli := &tikv.CodecPDClient{Client: mocktikv.NewPDClient(cluster)} - spkv := tikv.NewMockSafePointKV() - store, err := tikv.NewKVStore("mocktikv-store", pdCli, spkv, client) - store.EnableTxnLocalLatches(1024000) - c.Assert(err, IsNil) - - // TODO: make it possible - // store, err := mockstore.NewMockStore( - // mockstore.WithStoreType(mockstore.MockTiKV), - // mockstore.WithClusterInspector(func(c cluster.Cluster) { - // mockstore.BootstrapWithMultiRegions(c, []byte("a"), []byte("b"), []byte("c")) - // s.cluster = c - // }), - // mockstore.WithPDClientHijacker(func(c pd.Client) pd.Client { - // return &codecPDClient{c} - // }), - // mockstore.WithTxnLocalLatches(1024000), - // ) - // c.Assert(err, IsNil) - - s.store = tikv.StoreProbe{KVStore: store} -} - -func (s *testCommitterSuite) TearDownSuite(c *C) { - atomic.StoreUint64(&tikv.CommitMaxBackoff, 20000) - atomic.StoreUint64(&tikv.VeryLongMaxBackoff, 600000) - s.store.Close() - s.OneByOneSuite.TearDownSuite(c) -} - -func (s *testCommitterSuite) begin(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return txn -} - -func (s *testCommitterSuite) beginAsyncCommit(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.SetEnableAsyncCommit(true) - return txn -} - -func (s *testCommitterSuite) checkValues(c *C, m map[string]string) { - txn := s.begin(c) - for k, v := range m { - val, err := txn.Get(context.TODO(), []byte(k)) - c.Assert(err, IsNil) - c.Assert(string(val), Equals, v) - } -} - -func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) { - txn := s.begin(c) - for k, v := range m { - err := txn.Set([]byte(k), []byte(v)) - c.Assert(err, IsNil) - } - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) - - s.checkValues(c, m) -} - -func randKV(keyLen, valLen int) (string, string) { - const letters = "abc" - k, v := make([]byte, keyLen), make([]byte, valLen) - for i := range k { - k[i] = letters[rand.Intn(len(letters))] - } - for i := range v { - v[i] = letters[rand.Intn(len(letters))] - } - return string(k), string(v) -} - -func (s *testCommitterSuite) TestDeleteYourWritesTTL(c *C) { - conf := *config.GetGlobalConfig() - oldConf := conf - defer config.StoreGlobalConfig(&oldConf) - conf.TiKVClient.TTLRefreshedTxnSize = 0 - config.StoreGlobalConfig(&conf) - - { - txn := s.begin(c) - err := txn.GetMemBuffer().SetWithFlags([]byte("bb"), []byte{0}, kv.SetPresumeKeyNotExists) - c.Assert(err, IsNil) - err = txn.Set([]byte("ba"), []byte{1}) - c.Assert(err, IsNil) - err = txn.Delete([]byte("bb")) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(context.Background()) - c.Assert(err, IsNil) - c.Check(committer.IsTTLRunning(), IsTrue) - } - - { - txn := s.begin(c) - err := txn.GetMemBuffer().SetWithFlags([]byte("dd"), []byte{0}, kv.SetPresumeKeyNotExists) - c.Assert(err, IsNil) - err = txn.Set([]byte("de"), []byte{1}) - c.Assert(err, IsNil) - err = txn.Delete([]byte("dd")) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(context.Background()) - c.Assert(err, IsNil) - c.Check(committer.IsTTLRunning(), IsTrue) - } -} - -func (s *testCommitterSuite) TestCommitRollback(c *C) { - s.mustCommit(c, map[string]string{ - "a": "a", - "b": "b", - "c": "c", - }) - - txn := s.begin(c) - txn.Set([]byte("a"), []byte("a1")) - txn.Set([]byte("b"), []byte("b1")) - txn.Set([]byte("c"), []byte("c1")) - - s.mustCommit(c, map[string]string{ - "c": "c2", - }) - - err := txn.Commit(context.Background()) - c.Assert(err, NotNil) - - s.checkValues(c, map[string]string{ - "a": "a", - "b": "b", - "c": "c2", - }) -} - -func (s *testCommitterSuite) TestPrewriteRollback(c *C) { - s.mustCommit(c, map[string]string{ - "a": "a0", - "b": "b0", - }) - ctx := context.Background() - txn1 := s.begin(c) - err := txn1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = txn1.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - committer, err := txn1.NewCommitter(0) - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - - txn2 := s.begin(c) - v, err := txn2.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte("a0")) - - err = committer.PrewriteAllMutations(ctx) - if err != nil { - // Retry. - txn1 = s.begin(c) - err = txn1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = txn1.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - committer, err = txn1.NewCommitter(0) - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - } - commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - committer.SetCommitTS(commitTS) - err = committer.CommitMutations(ctx) - c.Assert(err, IsNil) - - txn3 := s.begin(c) - v, err = txn3.Get(context.TODO(), []byte("b")) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte("b1")) -} - -func (s *testCommitterSuite) TestContextCancel(c *C) { - txn1 := s.begin(c) - err := txn1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = txn1.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - committer, err := txn1.NewCommitter(0) - c.Assert(err, IsNil) - - ctx, cancel := context.WithCancel(context.Background()) - cancel() // cancel the context - err = committer.PrewriteAllMutations(ctx) - c.Assert(errors.Cause(err), Equals, context.Canceled) -} - -func (s *testCommitterSuite) TestContextCancel2(c *C) { - txn := s.begin(c) - err := txn.Set([]byte("a"), []byte("a")) - c.Assert(err, IsNil) - err = txn.Set([]byte("b"), []byte("b")) - c.Assert(err, IsNil) - ctx, cancel := context.WithCancel(context.Background()) - err = txn.Commit(ctx) - c.Assert(err, IsNil) - cancel() - // Secondary keys should not be canceled. - time.Sleep(time.Millisecond * 20) - c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse) -} - -func (s *testCommitterSuite) TestContextCancelRetryable(c *C) { - txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c) - // txn1 locks "b" - err := txn1.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - committer, err := txn1.NewCommitter(0) - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(context.Background()) - c.Assert(err, IsNil) - // txn3 writes "c" - err = txn3.Set([]byte("c"), []byte("c3")) - c.Assert(err, IsNil) - err = txn3.Commit(context.Background()) - c.Assert(err, IsNil) - // txn2 writes "a"(PK), "b", "c" on different regions. - // "c" will return a retryable error. - // "b" will get a Locked error first, then the context must be canceled after backoff for lock. - err = txn2.Set([]byte("a"), []byte("a2")) - c.Assert(err, IsNil) - err = txn2.Set([]byte("b"), []byte("b2")) - c.Assert(err, IsNil) - err = txn2.Set([]byte("c"), []byte("c2")) - c.Assert(err, IsNil) - err = txn2.Commit(context.Background()) - c.Assert(err, NotNil) - _, ok := err.(*tikverr.ErrWriteConflictInLatch) - c.Assert(ok, IsTrue, Commentf("err: %s", err)) -} - -func (s *testCommitterSuite) TestContextCancelCausingUndetermined(c *C) { - // For a normal transaction, if RPC returns context.Canceled error while sending commit - // requests, the transaction should go to the undetermined state. - txn := s.begin(c) - err := txn.Set([]byte("a"), []byte("va")) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - committer.PrewriteAllMutations(context.Background()) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcContextCancelErr", `return(true)`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcContextCancelErr"), IsNil) - }() - - err = committer.CommitMutations(context.Background()) - c.Assert(committer.GetUndeterminedErr(), NotNil) - c.Assert(errors.Cause(err), Equals, context.Canceled) -} - -func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 { - loc, err := s.store.GetRegionCache().LocateKey(tikv.NewBackofferWithVars(context.Background(), 500, nil), key) - c.Assert(err, IsNil) - return loc.Region.GetID() -} - -func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool { - ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - bo := tikv.NewBackofferWithVars(context.Background(), 500, nil) - req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{ - Key: key, - Version: ver, - }) - loc, err := s.store.GetRegionCache().LocateKey(bo, key) - c.Assert(err, IsNil) - resp, err := s.store.SendReq(bo, req, loc.Region, 5000) - c.Assert(err, IsNil) - c.Assert(resp.Resp, NotNil) - keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError() - return keyErr.GetLocked() != nil -} - -func (s *testCommitterSuite) TestPrewriteCancel(c *C) { - // Setup region delays for key "b" and "c". - delays := map[uint64]time.Duration{ - s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10, - s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20, - } - s.store.SetTiKVClient(&slowClient{ - Client: s.store.GetTiKVClient(), - regionDelays: delays, - }) - - txn1, txn2 := s.begin(c), s.begin(c) - // txn2 writes "b" - err := txn2.Set([]byte("b"), []byte("b2")) - c.Assert(err, IsNil) - err = txn2.Commit(context.Background()) - c.Assert(err, IsNil) - // txn1 writes "a"(PK), "b", "c" on different regions. - // "b" will return an error and cancel commit. - err = txn1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = txn1.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - err = txn1.Set([]byte("c"), []byte("c1")) - c.Assert(err, IsNil) - err = txn1.Commit(context.Background()) - c.Assert(err, NotNil) - // "c" should be cleaned up in reasonable time. - for i := 0; i < 50; i++ { - if !s.isKeyLocked(c, []byte("c")) { - return - } - time.Sleep(time.Millisecond * 10) - } - c.Fail() -} - -// slowClient wraps rpcClient and makes some regions respond with delay. -type slowClient struct { - tikv.Client - regionDelays map[uint64]time.Duration -} - -func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { - for id, delay := range c.regionDelays { - reqCtx := &req.Context - if reqCtx.GetRegionId() == id { - time.Sleep(delay) - } - } - return c.Client.SendRequest(ctx, addr, req, timeout) -} - -func (s *testCommitterSuite) TestIllegalTso(c *C) { - txn := s.begin(c) - data := map[string]string{ - "name": "aa", - "age": "12", - } - for k, v := range data { - err := txn.Set([]byte(k), []byte(v)) - c.Assert(err, IsNil) - } - // make start ts bigger. - txn.SetStartTS(math.MaxUint64) - err := txn.Commit(context.Background()) - c.Assert(err, NotNil) - errMsgMustContain(c, err, "invalid txnStartTS") -} - -func errMsgMustContain(c *C, err error, msg string) { - c.Assert(strings.Contains(err.Error(), msg), IsTrue) -} - -func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) { - txn := s.begin(c) - err := txn.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - ctx := context.Background() - committer.Cleanup(ctx) - err = committer.PrewriteAllMutations(ctx) - c.Assert(err, NotNil) - errMsgMustContain(c, err, "already rolled back") -} - -func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) { - // commit (a,a1) - txn1 := s.begin(c) - err := txn1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = txn1.Commit(context.Background()) - c.Assert(err, IsNil) - - // check a - txn := s.begin(c) - v, err := txn.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte("a1")) - - // set txn2's startTs before txn1's - txn2 := s.begin(c) - txn2.SetStartTS(txn1.StartTS() - 1) - err = txn2.Set([]byte("a"), []byte("a2")) - c.Assert(err, IsNil) - err = txn2.Set([]byte("b"), []byte("b2")) - c.Assert(err, IsNil) - // prewrite:primary a failed, b success - err = txn2.Commit(context.Background()) - c.Assert(err, NotNil) - - // txn2 failed with a rollback for record a. - txn = s.begin(c) - v, err = txn.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte("a1")) - _, err = txn.Get(context.TODO(), []byte("b")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - - // clean again, shouldn't be failed when a rollback already exist. - ctx := context.Background() - committer, err := txn2.NewCommitter(0) - c.Assert(err, IsNil) - committer.Cleanup(ctx) - - // check the data after rollback twice. - txn = s.begin(c) - v, err = txn.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte("a1")) - - // update data in a new txn, should be success. - err = txn.Set([]byte("a"), []byte("a3")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - // check value - txn = s.begin(c) - v, err = txn.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte("a3")) -} - -func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) { - // This test checks that when there is a write conflict, written keys is collected, - // so we can use it to clean up keys. - region, _ := s.cluster.GetRegionByKey([]byte("x")) - newRegionID := s.cluster.AllocID() - newPeerID := s.cluster.AllocID() - s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID) - var totalTime time.Duration - for i := 0; i < 10; i++ { - txn1 := s.begin(c) - txn2 := s.begin(c) - txn2.Set([]byte("x1"), []byte("1")) - committer2, err := txn2.NewCommitter(2) - c.Assert(err, IsNil) - err = committer2.Execute(context.Background()) - c.Assert(err, IsNil) - txn1.Set([]byte("x1"), []byte("1")) - txn1.Set([]byte("y1"), []byte("2")) - committer1, err := txn1.NewCommitter(2) - c.Assert(err, IsNil) - err = committer1.Execute(context.Background()) - c.Assert(err, NotNil) - committer1.WaitCleanup() - txn3 := s.begin(c) - start := time.Now() - txn3.Get(context.TODO(), []byte("y1")) - totalTime += time.Since(start) - txn3.Commit(context.Background()) - } - c.Assert(totalTime, Less, time.Millisecond*200) -} - -func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) { - // Prepare two regions first: (, 100) and [100, ) - region, _ := s.cluster.GetRegionByKey([]byte{50}) - newRegionID := s.cluster.AllocID() - newPeerID := s.cluster.AllocID() - s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID) - - txn := s.begin(c) - var val [1024]byte - for i := byte(50); i < 120; i++ { - err := txn.Set([]byte{i}, val[:]) - c.Assert(err, IsNil) - } - - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - - ctx := context.Background() - err = committer.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - - // Check the written locks in the first region (50 keys) - for i := byte(50); i < 100; i++ { - lock := s.getLockInfo(c, []byte{i}) - c.Assert(int(lock.TxnSize), Equals, 50) - } - - // Check the written locks in the second region (20 keys) - for i := byte(100); i < 120; i++ { - lock := s.getLockInfo(c, []byte{i}) - c.Assert(int(lock.TxnSize), Equals, 20) - } -} - -func (s *testCommitterSuite) TestRejectCommitTS(c *C) { - txn := s.begin(c) - c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil) - - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("x")) - c.Assert(err, IsNil) - mutations := []*kvrpcpb.Mutation{ - { - Op: committer.GetMutations().GetOp(0), - Key: committer.GetMutations().GetKey(0), - Value: committer.GetMutations().GetValue(0), - }, - } - prewrite := &kvrpcpb.PrewriteRequest{ - Mutations: mutations, - PrimaryLock: committer.GetPrimaryKey(), - StartVersion: committer.GetStartTS(), - LockTtl: committer.GetLockTTL(), - MinCommitTs: committer.GetStartTS() + 100, // Set minCommitTS - } - req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite) - _, err = s.store.SendReq(bo, req, loc.Region, 5000) - c.Assert(err, IsNil) - - // Make commitTS less than minCommitTS. - committer.SetCommitTS(committer.GetStartTS() + 1) - // Ensure that the new commit ts is greater than minCommitTS when retry - time.Sleep(3 * time.Millisecond) - err = committer.CommitMutations(context.Background()) - c.Assert(err, IsNil) - - // Use startTS+2 to read the data and get nothing. - // Use max.Uint64 to read the data and success. - // That means the final commitTS > startTS+2, it's not the one we provide. - // So we cover the rety commitTS logic. - txn1, err := s.store.BeginWithOption(tikv.DefaultStartTSOption().SetStartTS(committer.GetStartTS() + 2)) - c.Assert(err, IsNil) - _, err = txn1.Get(bo.GetCtx(), []byte("x")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - - txn2, err := s.store.BeginWithOption(tikv.DefaultStartTSOption().SetStartTS(math.MaxUint64)) - c.Assert(err, IsNil) - val, err := txn2.Get(bo.GetCtx(), []byte("x")) - c.Assert(err, IsNil) - c.Assert(bytes.Equal(val, []byte("v")), IsTrue) -} - -func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) { - // This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock. - txn := s.begin(c) - txn.SetPessimistic(true) - err := txn.Set([]byte("t1"), []byte("v1")) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - committer.SetForUpdateTS(100) - req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations().Slice(0, 1), 1) - c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0) - c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100)) -} - -func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) { - // This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock. - key := []byte("key") - txn := s.begin(c) - c.Assert(txn.Set(key, key), IsNil) - c.Assert(txn.Commit(context.Background()), IsNil) - - txn = s.begin(c) - txn.SetPessimistic(true) - _, _ = txn.GetUnionStore().Get(context.TODO(), key) - c.Assert(txn.GetMemBuffer().SetWithFlags(key, key, kv.SetPresumeKeyNotExists), IsNil) - lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()} - err := txn.LockKeys(context.Background(), lockCtx, key) - c.Assert(err, NotNil) - c.Assert(txn.Delete(key), IsNil) - key2 := []byte("key2") - c.Assert(txn.Set(key2, key2), IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) -} - -func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) { - txn := s.begin(c) - txn.SetPessimistic(true) - lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()} - err := txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def")) - c.Assert(err, IsNil) - lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()} - err = txn.LockKeys(context.Background(), lockCtx, []byte("abc"), []byte("def")) - c.Assert(err, IsNil) - c.Assert(txn.CollectLockedKeys(), HasLen, 2) -} - -func (s *testCommitterSuite) TestPessimisticTTL(c *C) { - key := []byte("key") - txn := s.begin(c) - txn.SetPessimistic(true) - time.Sleep(time.Millisecond * 100) - lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()} - err := txn.LockKeys(context.Background(), lockCtx, key) - c.Assert(err, IsNil) - time.Sleep(time.Millisecond * 100) - key2 := []byte("key2") - lockCtx = &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()} - err = txn.LockKeys(context.Background(), lockCtx, key2) - c.Assert(err, IsNil) - lockInfo := s.getLockInfo(c, key) - msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(msBeforeLockExpired, GreaterEqual, int64(100)) - - lr := s.store.NewLockResolver() - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - status, err := lr.GetTxnStatus(bo, txn.StartTS(), key2, 0, txn.StartTS(), true, false, nil) - c.Assert(err, IsNil) - c.Assert(status.TTL(), GreaterEqual, lockInfo.LockTtl) - - // Check primary lock TTL is auto increasing while the pessimistic txn is ongoing. - for i := 0; i < 50; i++ { - lockInfoNew := s.getLockInfo(c, key) - if lockInfoNew.LockTtl > lockInfo.LockTtl { - currentTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - // Check that the TTL is update to a reasonable range. - expire := oracle.ExtractPhysical(txn.StartTS()) + int64(lockInfoNew.LockTtl) - now := oracle.ExtractPhysical(currentTS) - c.Assert(expire > now, IsTrue) - c.Assert(uint64(expire-now) <= atomic.LoadUint64(&tikv.ManagedLockTTL), IsTrue) - return - } - time.Sleep(100 * time.Millisecond) - } - c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail")) -} - -func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) { - key := []byte("key") - key2 := []byte("key2") - txn := s.begin(c) - c.Assert(txn.Set(key, key), IsNil) - c.Assert(txn.Set(key2, key2), IsNil) - c.Assert(txn.Commit(context.Background()), IsNil) - txn = s.begin(c) - txn.SetPessimistic(true) - lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()} - lockCtx.InitReturnValues(2) - c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil) - c.Assert(lockCtx.Values, HasLen, 2) - c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, key) - c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, key2) -} - -// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time. -func (s *testCommitterSuite) TestElapsedTTL(c *C) { - key := []byte("key") - txn := s.begin(c) - txn.SetStartTS(oracle.GoTimeToTS(time.Now().Add(time.Second*10)) + 1) - txn.SetPessimistic(true) - time.Sleep(time.Millisecond * 100) - lockCtx := &kv.LockCtx{ - ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.StartTS())+100, 1), - WaitStartTime: time.Now(), - } - err := txn.LockKeys(context.Background(), lockCtx, key) - c.Assert(err, IsNil) - lockInfo := s.getLockInfo(c, key) - c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), GreaterEqual, uint64(100)) - c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&tikv.ManagedLockTTL), Less, uint64(150)) -} - -func (s *testCommitterSuite) TestDeleteYourWriteCauseGhostPrimary(c *C) { - s.cluster.SplitKeys([]byte("d"), []byte("a"), 4) - k1 := []byte("a") // insert but deleted key at first pos in txn1 - k2 := []byte("b") // insert key at second pos in txn1 - k3 := []byte("c") // insert key in txn1 and will be conflict read by txn2 - - // insert k1, k2, k3 and delete k1 - txn1 := s.begin(c) - txn1.SetPessimistic(false) - s.store.ClearTxnLatches() - txn1.Get(context.Background(), k1) - txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists) - txn1.Set(k2, []byte{1}) - txn1.Set(k3, []byte{2}) - txn1.Delete(k1) - committer1, err := txn1.NewCommitter(0) - c.Assert(err, IsNil) - // setup test knob in txn's committer - ac, bk := make(chan struct{}), make(chan struct{}) - committer1.SetPrimaryKeyBlocker(ac, bk) - txn1.SetCommitter(committer1) - var txn1Done sync.WaitGroup - txn1Done.Add(1) - go func() { - err1 := txn1.Commit(context.Background()) - c.Assert(err1, IsNil) - txn1Done.Done() - }() - // resume after after primary key be committed - <-ac - - // start txn2 to read k3(prewrite success and primary should be committed) - txn2 := s.begin(c) - txn2.SetPessimistic(false) - s.store.ClearTxnLatches() - v, err := txn2.Get(context.Background(), k3) - c.Assert(err, IsNil) // should resolve lock and read txn1 k3 result instead of rollback it. - c.Assert(v[0], Equals, byte(2)) - bk <- struct{}{} - txn1Done.Wait() -} - -func (s *testCommitterSuite) TestDeleteAllYourWrites(c *C) { - s.cluster.SplitKeys([]byte("d"), []byte("a"), 4) - k1 := []byte("a") - k2 := []byte("b") - k3 := []byte("c") - - // insert k1, k2, k3 and delete k1, k2, k3 - txn1 := s.begin(c) - txn1.SetPessimistic(false) - s.store.ClearTxnLatches() - txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists) - txn1.Delete(k1) - txn1.GetMemBuffer().SetWithFlags(k2, []byte{1}, kv.SetPresumeKeyNotExists) - txn1.Delete(k2) - txn1.GetMemBuffer().SetWithFlags(k3, []byte{2}, kv.SetPresumeKeyNotExists) - txn1.Delete(k3) - err1 := txn1.Commit(context.Background()) - c.Assert(err1, IsNil) -} - -func (s *testCommitterSuite) TestDeleteAllYourWritesWithSFU(c *C) { - s.cluster.SplitKeys([]byte("d"), []byte("a"), 4) - k1 := []byte("a") - k2 := []byte("b") - k3 := []byte("c") - - // insert k1, k2, k2 and delete k1 - txn1 := s.begin(c) - txn1.SetPessimistic(false) - s.store.ClearTxnLatches() - txn1.GetMemBuffer().SetWithFlags(k1, []byte{0}, kv.SetPresumeKeyNotExists) - txn1.Delete(k1) - err := txn1.LockKeys(context.Background(), &kv.LockCtx{}, k2, k3) // select * from t where x in (k2, k3) for update - c.Assert(err, IsNil) - - committer1, err := txn1.NewCommitter(0) - c.Assert(err, IsNil) - // setup test knob in txn's committer - ac, bk := make(chan struct{}), make(chan struct{}) - committer1.SetPrimaryKeyBlocker(ac, bk) - txn1.SetCommitter(committer1) - var txn1Done sync.WaitGroup - txn1Done.Add(1) - go func() { - err1 := txn1.Commit(context.Background()) - c.Assert(err1, IsNil) - txn1Done.Done() - }() - // resume after after primary key be committed - <-ac - // start txn2 to read k3 - txn2 := s.begin(c) - txn2.SetPessimistic(false) - s.store.ClearTxnLatches() - err = txn2.Set(k3, []byte{33}) - c.Assert(err, IsNil) - var meetLocks []*tikv.Lock - resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()} - resolver.SetMeetLockCallback(func(locks []*tikv.Lock) { - meetLocks = append(meetLocks, locks...) - }) - err = txn2.Commit(context.Background()) - c.Assert(err, IsNil) - bk <- struct{}{} - txn1Done.Wait() - c.Assert(meetLocks[0].Primary[0], Equals, k2[0]) -} - -// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction. -// The lock's own TTL is expired but the primary key is still alive due to heartbeats. -func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) { - atomic.StoreUint64(&tikv.ManagedLockTTL, 1000) // 1s - defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default test value - - // k1 is the primary lock of txn1 - k1 := []byte("k1") - // k2 is a secondary lock of txn1 and a key txn2 wants to lock - k2 := []byte("k2") - - txn1 := s.begin(c) - txn1.SetPessimistic(true) - // lock the primary key - lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err := txn1.LockKeys(context.Background(), lockCtx, k1) - c.Assert(err, IsNil) - // lock the secondary key - lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err = txn1.LockKeys(context.Background(), lockCtx, k2) - c.Assert(err, IsNil) - - // Heartbeats will increase the TTL of the primary key - - // wait until secondary key exceeds its own TTL - time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond) - txn2 := s.begin(c) - txn2.SetPessimistic(true) - - // test no wait - lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: tikv.LockNoWait, WaitStartTime: time.Now()} - err = txn2.LockKeys(context.Background(), lockCtx, k2) - // cannot acquire lock immediately thus error - c.Assert(err.Error(), Equals, tikverr.ErrLockAcquireFailAndNoWaitSet.Error()) - - // test for wait limited time (200ms) - lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), LockWaitTime: 200, WaitStartTime: time.Now()} - err = txn2.LockKeys(context.Background(), lockCtx, k2) - // cannot acquire lock in time thus error - c.Assert(err.Error(), Equals, tikverr.ErrLockWaitTimeout.Error()) -} - -func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo { - txn := s.begin(c) - err := txn.Set(key, key) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, key) - c.Assert(err, IsNil) - req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), committer.GetMutations().Slice(0, 1), 1) - resp, err := s.store.SendReq(bo, req, loc.Region, 5000) - c.Assert(err, IsNil) - c.Assert(resp.Resp, NotNil) - keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors - c.Assert(keyErrs, HasLen, 1) - locked := keyErrs[0].Locked - c.Assert(locked, NotNil) - return locked -} - -func (s *testCommitterSuite) TestPkNotFound(c *C) { - atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms - defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value - ctx := context.Background() - // k1 is the primary lock of txn1. - k1 := []byte("k1") - // k2 is a secondary lock of txn1 and a key txn2 wants to lock. - k2 := []byte("k2") - k3 := []byte("k3") - - txn1 := s.begin(c) - txn1.SetPessimistic(true) - // lock the primary key. - lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err := txn1.LockKeys(ctx, lockCtx, k1) - c.Assert(err, IsNil) - // lock the secondary key. - lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err = txn1.LockKeys(ctx, lockCtx, k2, k3) - c.Assert(err, IsNil) - // Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually, - // while the secondary lock operation succeeded. - txn1.GetCommitter().CloseTTLManager() - - var status tikv.TxnStatus - bo := tikv.NewBackofferWithVars(ctx, 5000, nil) - lockKey2 := &tikv.Lock{ - Key: k2, - Primary: k1, - TxnID: txn1.StartTS(), - TTL: 0, // let the primary lock k1 expire doing check. - TxnSize: txnCommitBatchSize, - LockType: kvrpcpb.Op_PessimisticLock, - LockForUpdateTS: txn1.StartTS(), - } - resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()} - status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false) - c.Assert(err, IsNil) - c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpirePessimisticRollback) - - // Txn2 tries to lock the secondary key k2, there should be no dead loop. - // Since the resolving key k2 is a pessimistic lock, no rollback record should be written, and later lock - // and the other secondary key k3 should succeed if there is no fail point enabled. - status, err = resolver.GetTxnStatusFromLock(bo, lockKey2, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false) - c.Assert(err, IsNil) - c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing) - txn2 := s.begin(c) - txn2.SetPessimistic(true) - lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()} - err = txn2.LockKeys(ctx, lockCtx, k2) - c.Assert(err, IsNil) - - // Pessimistic rollback using smaller forUpdateTS does not take effect. - lockKey3 := &tikv.Lock{ - Key: k3, - Primary: k1, - TxnID: txn1.StartTS(), - TTL: tikv.ManagedLockTTL, - TxnSize: txnCommitBatchSize, - LockType: kvrpcpb.Op_PessimisticLock, - LockForUpdateTS: txn1.StartTS() - 1, - } - err = resolver.ResolvePessimisticLock(ctx, lockKey3) - c.Assert(err, IsNil) - lockCtx = &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err = txn1.LockKeys(ctx, lockCtx, k3) - c.Assert(err, IsNil) - - // After disable fail point, the rollbackIfNotExist flag will be set, and the resolve should succeed. In this - // case, the returned action of TxnStatus should be LockNotExistDoNothing, and lock on k3 could be resolved. - txn3 := s.begin(c) - txn3.SetPessimistic(true) - lockCtx = &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait} - err = txn3.LockKeys(ctx, lockCtx, k3) - c.Assert(err, IsNil) - status, err = resolver.GetTxnStatusFromLock(bo, lockKey3, oracle.GoTimeToTS(time.Now().Add(200*time.Millisecond)), false) - c.Assert(err, IsNil) - c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistDoNothing) -} - -func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) { - // a is the primary lock of txn1 - k1 := []byte("a") - // b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region - k2 := []byte("b") - - txn1 := s.begin(c) - txn1.SetPessimistic(true) - // txn1 lock k1 - lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err := txn1.LockKeys(context.Background(), lockCtx, k1) - c.Assert(err, IsNil) - - // txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to - // lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2 - doneCh := make(chan error) - go func() { - txn2 := s.begin(c) - txn2.SetPessimistic(true) - lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: 200} - waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2) - doneCh <- waitErr - }() - time.Sleep(50 * time.Millisecond) - - // txn3 should locks k2 successfully using no wait - txn3 := s.begin(c) - txn3.SetPessimistic(true) - lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait} - c.Assert(failpoint.Enable("tikvclient/txnNotFoundRetTTL", "return"), IsNil) - err = txn3.LockKeys(context.Background(), lockCtx3, k2) - c.Assert(failpoint.Disable("tikvclient/txnNotFoundRetTTL"), IsNil) - c.Assert(err, IsNil) - waitErr := <-doneCh - c.Assert(tikverr.ErrLockWaitTimeout, Equals, waitErr) -} - -func (s *testCommitterSuite) TestResolvePessimisticLock(c *C) { - untouchedIndexKey := []byte("t00000001_i000000001") - untouchedIndexValue := []byte{0, 0, 0, 0, 0, 0, 0, 1, 49} - noValueIndexKey := []byte("t00000001_i000000002") - txn := s.begin(c) - txn.SetKVFilter(drivertxn.TiDBKVFilter{}) - err := txn.Set(untouchedIndexKey, untouchedIndexValue) - c.Assert(err, IsNil) - lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait} - err = txn.LockKeys(context.Background(), lockCtx, untouchedIndexKey, noValueIndexKey) - c.Assert(err, IsNil) - commit, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - mutation := commit.MutationsOfKeys([][]byte{untouchedIndexKey, noValueIndexKey}) - c.Assert(mutation.Len(), Equals, 2) - c.Assert(mutation.GetOp(0), Equals, kvrpcpb.Op_Lock) - c.Assert(mutation.GetKey(0), BytesEquals, untouchedIndexKey) - c.Assert(mutation.GetValue(0), BytesEquals, untouchedIndexValue) - c.Assert(mutation.GetOp(1), Equals, kvrpcpb.Op_Lock) - c.Assert(mutation.GetKey(1), BytesEquals, noValueIndexKey) - c.Assert(mutation.GetValue(1), BytesEquals, []byte{}) -} - -func (s *testCommitterSuite) TestCommitDeadLock(c *C) { - // Split into two region and let k1 k2 in different regions. - s.cluster.SplitKeys([]byte("z"), []byte("a"), 2) - k1 := []byte("a_deadlock_k1") - k2 := []byte("y_deadlock_k2") - - region1, _ := s.cluster.GetRegionByKey(k1) - region2, _ := s.cluster.GetRegionByKey(k2) - c.Assert(region1.Id != region2.Id, IsTrue) - - txn1 := s.begin(c) - txn1.Set(k1, []byte("t1")) - txn1.Set(k2, []byte("t1")) - commit1, err := txn1.NewCommitter(1) - c.Assert(err, IsNil) - commit1.SetPrimaryKey(k1) - commit1.SetTxnSize(1000 * 1024 * 1024) - - txn2 := s.begin(c) - txn2.Set(k1, []byte("t2")) - txn2.Set(k2, []byte("t2")) - commit2, err := txn2.NewCommitter(2) - c.Assert(err, IsNil) - commit2.SetPrimaryKey(k2) - commit2.SetTxnSize(1000 * 1024 * 1024) - - s.cluster.ScheduleDelay(txn2.StartTS(), region1.Id, 5*time.Millisecond) - s.cluster.ScheduleDelay(txn1.StartTS(), region2.Id, 5*time.Millisecond) - - // Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn - // protocol run ttlManager and update their TTL, cause dead lock. - ch := make(chan error, 2) - var wg sync.WaitGroup - wg.Add(1) - go func() { - ch <- commit2.Execute(context.Background()) - wg.Done() - }() - ch <- commit1.Execute(context.Background()) - wg.Wait() - close(ch) - - res := 0 - for e := range ch { - if e != nil { - res++ - } - } - c.Assert(res, Equals, 1) -} - -// TestPushPessimisticLock tests that push forward the minCommiTS of pessimistic locks. -func (s *testCommitterSuite) TestPushPessimisticLock(c *C) { - // k1 is the primary key. - k1, k2 := []byte("a"), []byte("b") - ctx := context.Background() - - txn1 := s.begin(c) - txn1.SetPessimistic(true) - lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err := txn1.LockKeys(context.Background(), lockCtx, k1, k2) - c.Assert(err, IsNil) - - txn1.Set(k2, []byte("v2")) - committer := txn1.GetCommitter() - err = committer.InitKeysAndMutations() - c.Assert(err, IsNil) - // Strip the prewrite of the primary key. - committer.SetMutations(committer.GetMutations().Slice(1, 2)) - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - // The primary lock is a pessimistic lock and the secondary lock is a optimistic lock. - lock1 := s.getLockInfo(c, k1) - c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock) - c.Assert(lock1.PrimaryLock, BytesEquals, k1) - lock2 := s.getLockInfo(c, k2) - c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put) - c.Assert(lock2.PrimaryLock, BytesEquals, k1) - - txn2 := s.begin(c) - start := time.Now() - _, err = txn2.Get(ctx, k2) - elapsed := time.Since(start) - // The optimistic lock shouldn't block reads. - c.Assert(elapsed, Less, 500*time.Millisecond) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - - txn1.Rollback() - txn2.Rollback() -} - -// TestResolveMixed tests mixed resolve with left behind optimistic locks and pessimistic locks, -// using clean whole region resolve path -func (s *testCommitterSuite) TestResolveMixed(c *C) { - atomic.StoreUint64(&tikv.ManagedLockTTL, 100) // 100ms - defer atomic.StoreUint64(&tikv.ManagedLockTTL, 3000) // restore default value - ctx := context.Background() - - // pk is the primary lock of txn1 - pk := []byte("pk") - secondaryLockkeys := make([][]byte, 0, bigTxnThreshold) - for i := 0; i < bigTxnThreshold; i++ { - optimisticLock := []byte(fmt.Sprintf("optimisticLockKey%d", i)) - secondaryLockkeys = append(secondaryLockkeys, optimisticLock) - } - pessimisticLockKey := []byte("pessimisticLockKey") - - // make the optimistic and pessimistic lock left with primary lock not found - txn1 := s.begin(c) - txn1.SetPessimistic(true) - // lock the primary key - lockCtx := &kv.LockCtx{ForUpdateTS: txn1.StartTS(), WaitStartTime: time.Now()} - err := txn1.LockKeys(context.Background(), lockCtx, pk) - c.Assert(err, IsNil) - // lock the optimistic keys - for i := 0; i < bigTxnThreshold; i++ { - txn1.Set(secondaryLockkeys[i], []byte(fmt.Sprintf("v%d", i))) - } - committer := txn1.GetCommitter() - err = committer.InitKeysAndMutations() - c.Assert(err, IsNil) - err = committer.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - // lock the pessimistic keys - err = txn1.LockKeys(context.Background(), lockCtx, pessimisticLockKey) - c.Assert(err, IsNil) - lock1 := s.getLockInfo(c, pessimisticLockKey) - c.Assert(lock1.LockType, Equals, kvrpcpb.Op_PessimisticLock) - c.Assert(lock1.PrimaryLock, BytesEquals, pk) - optimisticLockKey := secondaryLockkeys[0] - lock2 := s.getLockInfo(c, optimisticLockKey) - c.Assert(lock2.LockType, Equals, kvrpcpb.Op_Put) - c.Assert(lock2.PrimaryLock, BytesEquals, pk) - - // stop txn ttl manager and remove primary key, make the other keys left behind - committer.CloseTTLManager() - muts := tikv.NewPlainMutations(1) - muts.Push(kvrpcpb.Op_Lock, pk, nil, true) - err = committer.PessimisticRollbackMutations(context.Background(), &muts) - c.Assert(err, IsNil) - - // try to resolve the left optimistic locks, use clean whole region - time.Sleep(time.Duration(atomic.LoadUint64(&tikv.ManagedLockTTL)) * time.Millisecond) - optimisticLockInfo := s.getLockInfo(c, optimisticLockKey) - lock := tikv.NewLock(optimisticLockInfo) - resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()} - err = resolver.ResolveLock(ctx, lock) - c.Assert(err, IsNil) - - // txn2 tries to lock the pessimisticLockKey, the lock should has been resolved in clean whole region resolve - txn2 := s.begin(c) - txn2.SetPessimistic(true) - lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now(), LockWaitTime: tikv.LockNoWait} - err = txn2.LockKeys(context.Background(), lockCtx, pessimisticLockKey) - c.Assert(err, IsNil) - - err = txn1.Rollback() - c.Assert(err, IsNil) - err = txn2.Rollback() - c.Assert(err, IsNil) -} - -// TestSecondaryKeys tests that when async commit is enabled, each prewrite message includes an -// accurate list of secondary keys. -func (s *testCommitterSuite) TestPrewriteSecondaryKeys(c *C) { - // Prepare two regions first: (, 100) and [100, ) - region, _ := s.cluster.GetRegionByKey([]byte{50}) - newRegionID := s.cluster.AllocID() - newPeerID := s.cluster.AllocID() - s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID) - - txn := s.beginAsyncCommit(c) - var val [1024]byte - for i := byte(50); i < 120; i++ { - err := txn.Set([]byte{i}, val[:]) - c.Assert(err, IsNil) - } - // Some duplicates. - for i := byte(50); i < 120; i += 10 { - err := txn.Set([]byte{i}, val[512:700]) - c.Assert(err, IsNil) - } - - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - - mock := mockClient{inner: s.store.GetTiKVClient()} - s.store.SetTiKVClient(&mock) - ctx := context.Background() - // TODO remove this when minCommitTS is returned from mockStore prewrite response. - committer.SetMinCommitTS(committer.GetStartTS() + 10) - committer.SetNoFallBack() - err = committer.Execute(ctx) - c.Assert(err, IsNil) - c.Assert(mock.seenPrimaryReq > 0, IsTrue) - c.Assert(mock.seenSecondaryReq > 0, IsTrue) -} - -func (s *testCommitterSuite) TestAsyncCommit(c *C) { - ctx := context.Background() - pk := []byte("tpk") - pkVal := []byte("pkVal") - k1 := []byte("tk1") - k1Val := []byte("k1Val") - txn1 := s.beginAsyncCommit(c) - err := txn1.Set(pk, pkVal) - c.Assert(err, IsNil) - err = txn1.Set(k1, k1Val) - c.Assert(err, IsNil) - - committer, err := txn1.NewCommitter(0) - c.Assert(err, IsNil) - committer.SetSessionID(1) - committer.SetMinCommitTS(txn1.StartTS() + 10) - err = committer.Execute(ctx) - c.Assert(err, IsNil) - - s.checkValues(c, map[string]string{ - string(pk): string(pkVal), - string(k1): string(k1Val), - }) -} - -func updateGlobalConfig(f func(conf *config.Config)) { - g := config.GetGlobalConfig() - newConf := *g - f(&newConf) - config.StoreGlobalConfig(&newConf) -} - -// restoreFunc gets a function that restore the config to the current value. -func restoreGlobalConfFunc() (restore func()) { - g := config.GetGlobalConfig() - return func() { - config.StoreGlobalConfig(g) - } -} - -func (s *testCommitterSuite) TestAsyncCommitCheck(c *C) { - defer restoreGlobalConfFunc()() - updateGlobalConfig(func(conf *config.Config) { - conf.TiKVClient.AsyncCommit.KeysLimit = 16 - conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 64 - }) - - txn := s.beginAsyncCommit(c) - buf := []byte{0, 0, 0, 0} - // Set 16 keys, each key is 4 bytes long. So the total size of keys is 64 bytes. - for i := 0; i < 16; i++ { - buf[0] = byte(i) - err := txn.Set(buf, []byte("v")) - c.Assert(err, IsNil) - } - - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - c.Assert(committer.CheckAsyncCommit(), IsTrue) - - updateGlobalConfig(func(conf *config.Config) { - conf.TiKVClient.AsyncCommit.KeysLimit = 15 - }) - c.Assert(committer.CheckAsyncCommit(), IsFalse) - - updateGlobalConfig(func(conf *config.Config) { - conf.TiKVClient.AsyncCommit.KeysLimit = 20 - conf.TiKVClient.AsyncCommit.TotalKeySizeLimit = 63 - }) - c.Assert(committer.CheckAsyncCommit(), IsFalse) -} - -type mockClient struct { - inner tikv.Client - seenPrimaryReq uint32 - seenSecondaryReq uint32 -} - -func (m *mockClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { - // If we find a prewrite request, check if it satisfies our constraints. - if pr, ok := req.Req.(*kvrpcpb.PrewriteRequest); ok { - if pr.UseAsyncCommit { - if isPrimary(pr) { - // The primary key should not be included, nor should there be any duplicates. All keys should be present. - if !includesPrimary(pr) && allKeysNoDups(pr) { - atomic.StoreUint32(&m.seenPrimaryReq, 1) - } - } else { - // Secondaries should only be sent with the primary key - if len(pr.Secondaries) == 0 { - atomic.StoreUint32(&m.seenSecondaryReq, 1) - } - } - } - } - return m.inner.SendRequest(ctx, addr, req, timeout) -} - -func (m *mockClient) Close() error { - return m.inner.Close() -} - -func isPrimary(req *kvrpcpb.PrewriteRequest) bool { - for _, m := range req.Mutations { - if bytes.Equal(req.PrimaryLock, m.Key) { - return true - } - } - - return false -} - -func includesPrimary(req *kvrpcpb.PrewriteRequest) bool { - for _, k := range req.Secondaries { - if bytes.Equal(req.PrimaryLock, k) { - return true - } - } - - return false -} - -func allKeysNoDups(req *kvrpcpb.PrewriteRequest) bool { - check := make(map[string]bool) - - // Create the check map and check for duplicates. - for _, k := range req.Secondaries { - s := string(k) - if check[s] { - return false - } - check[s] = true - } - - // Check every key is present. - for i := byte(50); i < 120; i++ { - k := []byte{i} - if !bytes.Equal(req.PrimaryLock, k) && !check[string(k)] { - return false - } - } - return true -} diff --git a/store/tikv/tests/async_commit_fail_test.go b/store/tikv/tests/async_commit_fail_test.go deleted file mode 100644 index 321803bb7124d..0000000000000 --- a/store/tikv/tests/async_commit_fail_test.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "sort" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/parser/terror" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/mockstore" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/util" -) - -type testAsyncCommitFailSuite struct { - OneByOneSuite - testAsyncCommitCommon -} - -var _ = SerialSuites(&testAsyncCommitFailSuite{}) - -func (s *testAsyncCommitFailSuite) SetUpTest(c *C) { - s.testAsyncCommitCommon.setUpTest(c) -} - -// TestFailCommitPrimaryRpcErrors tests rpc errors are handled properly when -// committing primary region task. -func (s *testAsyncCommitFailSuite) TestFailAsyncCommitPrewriteRpcErrors(c *C) { - // This test doesn't support tikv mode because it needs setting failpoint in unistore. - if *mockstore.WithTiKV { - return - } - - c.Assert(failpoint.Enable("tikvclient/noRetryOnRpcError", "return(true)"), IsNil) - c.Assert(failpoint.Enable("tikvclient/rpcPrewriteTimeout", `return(true)`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcPrewriteTimeout"), IsNil) - c.Assert(failpoint.Disable("tikvclient/noRetryOnRpcError"), IsNil) - }() - // The rpc error will be wrapped to ErrResultUndetermined. - t1 := s.beginAsyncCommit(c) - err := t1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = t1.Commit(ctx) - c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, terror.ErrResultUndetermined), IsTrue, Commentf("%s", errors.ErrorStack(err))) - - // We don't need to call "Rollback" after "Commit" fails. - err = t1.Rollback() - c.Assert(err, Equals, tikverr.ErrInvalidTxn) - - // Create a new transaction to check. The previous transaction should actually commit. - t2 := s.beginAsyncCommit(c) - res, err := t2.Get(context.Background(), []byte("a")) - c.Assert(err, IsNil) - c.Assert(bytes.Equal(res, []byte("a1")), IsTrue) -} - -func (s *testAsyncCommitFailSuite) TestAsyncCommitPrewriteCancelled(c *C) { - // This test doesn't support tikv mode because it needs setting failpoint in unistore. - if *mockstore.WithTiKV { - return - } - - // Split into two regions. - splitKey := "s" - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, []byte(splitKey)) - c.Assert(err, IsNil) - newRegionID := s.cluster.AllocID() - newPeerID := s.cluster.AllocID() - s.cluster.Split(loc.Region.GetID(), newRegionID, []byte(splitKey), []uint64{newPeerID}, newPeerID) - s.store.GetRegionCache().InvalidateCachedRegion(loc.Region) - - c.Assert(failpoint.Enable("tikvclient/rpcPrewriteResult", `1*return("writeConflict")->sleep(50)`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcPrewriteResult"), IsNil) - }() - - t1 := s.beginAsyncCommit(c) - err = t1.Set([]byte("a"), []byte("a")) - c.Assert(err, IsNil) - err = t1.Set([]byte("z"), []byte("z")) - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = t1.Commit(ctx) - c.Assert(err, NotNil) - _, ok := errors.Cause(err).(*tikverr.ErrWriteConflict) - c.Assert(ok, IsTrue, Commentf("%s", errors.ErrorStack(err))) -} - -func (s *testAsyncCommitFailSuite) TestPointGetWithAsyncCommit(c *C) { - s.putAlphabets(c, true) - - txn := s.beginAsyncCommit(c) - txn.Set([]byte("a"), []byte("v1")) - txn.Set([]byte("b"), []byte("v2")) - s.mustPointGet(c, []byte("a"), []byte("a")) - s.mustPointGet(c, []byte("b"), []byte("b")) - - // PointGet cannot ignore async commit transactions' locks. - c.Assert(failpoint.Enable("tikvclient/asyncCommitDoNothing", "return"), IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err := txn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(txn.GetCommitter().IsAsyncCommit(), IsTrue) - s.mustPointGet(c, []byte("a"), []byte("v1")) - s.mustPointGet(c, []byte("b"), []byte("v2")) - c.Assert(failpoint.Disable("tikvclient/asyncCommitDoNothing"), IsNil) - - // PointGet will not push the `max_ts` to its ts which is MaxUint64. - txn2 := s.beginAsyncCommit(c) - s.mustGetFromTxn(c, txn2, []byte("a"), []byte("v1")) - s.mustGetFromTxn(c, txn2, []byte("b"), []byte("v2")) - err = txn2.Rollback() - c.Assert(err, IsNil) -} - -func (s *testAsyncCommitFailSuite) TestSecondaryListInPrimaryLock(c *C) { - // This test doesn't support tikv mode. - if *mockstore.WithTiKV { - return - } - - s.putAlphabets(c, true) - - // Split into several regions. - for _, splitKey := range []string{"h", "o", "u"} { - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, []byte(splitKey)) - c.Assert(err, IsNil) - newRegionID := s.cluster.AllocID() - newPeerID := s.cluster.AllocID() - s.cluster.Split(loc.Region.GetID(), newRegionID, []byte(splitKey), []uint64{newPeerID}, newPeerID) - s.store.GetRegionCache().InvalidateCachedRegion(loc.Region) - } - - // Ensure the region has been split - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("i")) - c.Assert(err, IsNil) - c.Assert(loc.StartKey, BytesEquals, []byte("h")) - c.Assert(loc.EndKey, BytesEquals, []byte("o")) - - loc, err = s.store.GetRegionCache().LocateKey(bo, []byte("p")) - c.Assert(err, IsNil) - c.Assert(loc.StartKey, BytesEquals, []byte("o")) - c.Assert(loc.EndKey, BytesEquals, []byte("u")) - - var sessionID uint64 = 0 - test := func(keys []string, values []string) { - sessionID++ - ctx := context.WithValue(context.Background(), util.SessionID, sessionID) - - txn := s.beginAsyncCommit(c) - for i := range keys { - txn.Set([]byte(keys[i]), []byte(values[i])) - } - - c.Assert(failpoint.Enable("tikvclient/asyncCommitDoNothing", "return"), IsNil) - - err = txn.Commit(ctx) - c.Assert(err, IsNil) - - primary := txn.GetCommitter().GetPrimaryKey() - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - lockResolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()} - txnStatus, err := lockResolver.GetTxnStatus(bo, txn.StartTS(), primary, 0, 0, false, false, nil) - c.Assert(err, IsNil) - c.Assert(txnStatus.IsCommitted(), IsFalse) - c.Assert(txnStatus.Action(), Equals, kvrpcpb.Action_NoAction) - // Currently when the transaction has no secondary, the `secondaries` field of the txnStatus - // will be set nil. So here initialize the `expectedSecondaries` to nil too. - var expectedSecondaries [][]byte - for _, k := range keys { - if !bytes.Equal([]byte(k), primary) { - expectedSecondaries = append(expectedSecondaries, []byte(k)) - } - } - sort.Slice(expectedSecondaries, func(i, j int) bool { - return bytes.Compare(expectedSecondaries[i], expectedSecondaries[j]) < 0 - }) - - gotSecondaries := lockResolver.GetSecondariesFromTxnStatus(txnStatus) - sort.Slice(gotSecondaries, func(i, j int) bool { - return bytes.Compare(gotSecondaries[i], gotSecondaries[j]) < 0 - }) - - c.Assert(gotSecondaries, DeepEquals, expectedSecondaries) - - c.Assert(failpoint.Disable("tikvclient/asyncCommitDoNothing"), IsNil) - txn.GetCommitter().Cleanup(context.Background()) - } - - test([]string{"a"}, []string{"a1"}) - test([]string{"a", "b"}, []string{"a2", "b2"}) - test([]string{"a", "b", "d"}, []string{"a3", "b3", "d3"}) - test([]string{"a", "b", "h", "i", "u"}, []string{"a4", "b4", "h4", "i4", "u4"}) - test([]string{"i", "a", "z", "u", "b"}, []string{"i5", "a5", "z5", "u5", "b5"}) -} - -func (s *testAsyncCommitFailSuite) TestAsyncCommitContextCancelCausingUndetermined(c *C) { - // For an async commit transaction, if RPC returns context.Canceled error when prewriting, the - // transaction should go to undetermined state. - txn := s.beginAsyncCommit(c) - err := txn.Set([]byte("a"), []byte("va")) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcContextCancelErr", `return(true)`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcContextCancelErr"), IsNil) - }() - - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = txn.Commit(ctx) - c.Assert(err, NotNil) - c.Assert(txn.GetCommitter().GetUndeterminedErr(), NotNil) -} - -// TestAsyncCommitRPCErrorThenWriteConflict verifies that the determined failure error overwrites undetermined error. -func (s *testAsyncCommitFailSuite) TestAsyncCommitRPCErrorThenWriteConflict(c *C) { - // This test doesn't support tikv mode because it needs setting failpoint in unistore. - if *mockstore.WithTiKV { - return - } - - txn := s.beginAsyncCommit(c) - err := txn.Set([]byte("a"), []byte("va")) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcPrewriteResult", `1*return("timeout")->return("writeConflict")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcPrewriteResult"), IsNil) - }() - - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = txn.Commit(ctx) - c.Assert(err, NotNil) - c.Assert(txn.GetCommitter().GetUndeterminedErr(), IsNil) -} - -// TestAsyncCommitRPCErrorThenWriteConflictInChild verifies that the determined failure error in a child recursion -// overwrites the undetermined error in the parent. -func (s *testAsyncCommitFailSuite) TestAsyncCommitRPCErrorThenWriteConflictInChild(c *C) { - // This test doesn't support tikv mode because it needs setting failpoint in unistore. - if *mockstore.WithTiKV { - return - } - - txn := s.beginAsyncCommit(c) - err := txn.Set([]byte("a"), []byte("va")) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcPrewriteResult", `1*return("timeout")->return("writeConflict")`), IsNil) - c.Assert(failpoint.Enable("tikvclient/forceRecursion", `return`), IsNil) - - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcPrewriteResult"), IsNil) - c.Assert(failpoint.Disable("tikvclient/forceRecursion"), IsNil) - }() - - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = txn.Commit(ctx) - c.Assert(err, NotNil) - c.Assert(txn.GetCommitter().GetUndeterminedErr(), IsNil) -} diff --git a/store/tikv/tests/async_commit_test.go b/store/tikv/tests/async_commit_test.go deleted file mode 100644 index 84c4dbc30edf0..0000000000000 --- a/store/tikv/tests/async_commit_test.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "fmt" - "math" - "sync/atomic" - "testing" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/store/mockstore/unistore" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/mockstore" - "github.com/tikv/client-go/v2/mockstore/cluster" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/tikvrpc" - "github.com/tikv/client-go/v2/util" -) - -func TestT(t *testing.T) { - CustomVerboseFlag = true - TestingT(t) -} - -// testAsyncCommitCommon is used to put common parts that will be both used by -// testAsyncCommitSuite and testAsyncCommitFailSuite. -type testAsyncCommitCommon struct { - cluster cluster.Cluster - store *tikv.KVStore -} - -func (s *testAsyncCommitCommon) setUpTest(c *C) { - if *mockstore.WithTiKV { - s.store = NewTestStore(c) - return - } - - client, pdClient, cluster, err := unistore.New("") - c.Assert(err, IsNil) - unistore.BootstrapWithSingleStore(cluster) - s.cluster = cluster - store, err := tikv.NewTestTiKVStore(fpClient{Client: client}, pdClient, nil, nil, 0) - c.Assert(err, IsNil) - - s.store = store -} - -func (s *testAsyncCommitCommon) putAlphabets(c *C, enableAsyncCommit bool) { - for ch := byte('a'); ch <= byte('z'); ch++ { - s.putKV(c, []byte{ch}, []byte{ch}, enableAsyncCommit) - } -} - -func (s *testAsyncCommitCommon) putKV(c *C, key, value []byte, enableAsyncCommit bool) (uint64, uint64) { - txn := s.beginAsyncCommit(c) - err := txn.Set(key, value) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - return txn.StartTS(), txn.GetCommitTS() -} - -func (s *testAsyncCommitCommon) mustGetFromTxn(c *C, txn tikv.TxnProbe, key, expectedValue []byte) { - v, err := txn.Get(context.Background(), key) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, expectedValue) -} - -func (s *testAsyncCommitCommon) mustGetLock(c *C, key []byte) *tikv.Lock { - ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{ - Key: key, - Version: ver, - }) - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, key) - c.Assert(err, IsNil) - resp, err := s.store.SendReq(bo, req, loc.Region, time.Second*10) - c.Assert(err, IsNil) - c.Assert(resp.Resp, NotNil) - keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError() - c.Assert(keyErr, NotNil) - var lockutil tikv.LockProbe - lock, err := lockutil.ExtractLockFromKeyErr(keyErr) - c.Assert(err, IsNil) - return lock -} - -func (s *testAsyncCommitCommon) mustPointGet(c *C, key, expectedValue []byte) { - snap := s.store.GetSnapshot(math.MaxUint64) - value, err := snap.Get(context.Background(), key) - c.Assert(err, IsNil) - c.Assert(value, BytesEquals, expectedValue) -} - -func (s *testAsyncCommitCommon) mustGetFromSnapshot(c *C, version uint64, key, expectedValue []byte) { - snap := s.store.GetSnapshot(version) - value, err := snap.Get(context.Background(), key) - c.Assert(err, IsNil) - c.Assert(value, BytesEquals, expectedValue) -} - -func (s *testAsyncCommitCommon) mustGetNoneFromSnapshot(c *C, version uint64, key []byte) { - snap := s.store.GetSnapshot(version) - _, err := snap.Get(context.Background(), key) - c.Assert(errors.Cause(err), Equals, tikverr.ErrNotExist) -} - -func (s *testAsyncCommitCommon) beginAsyncCommitWithLinearizability(c *C) tikv.TxnProbe { - txn := s.beginAsyncCommit(c) - txn.SetCausalConsistency(false) - return txn -} - -func (s *testAsyncCommitCommon) beginAsyncCommit(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.SetEnableAsyncCommit(true) - return tikv.TxnProbe{KVTxn: txn} -} - -func (s *testAsyncCommitCommon) begin(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return tikv.TxnProbe{KVTxn: txn} -} - -type testAsyncCommitSuite struct { - OneByOneSuite - testAsyncCommitCommon - bo *tikv.Backoffer -} - -var _ = SerialSuites(&testAsyncCommitSuite{}) - -func (s *testAsyncCommitSuite) SetUpTest(c *C) { - s.testAsyncCommitCommon.setUpTest(c) - s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil) -} - -func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]byte, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.SetEnableAsyncCommit(true) - for i, k := range keys { - if len(values[i]) > 0 { - err = txn.Set(k, values[i]) - } else { - err = txn.Delete(k) - } - c.Assert(err, IsNil) - } - if len(primaryValue) > 0 { - err = txn.Set(primaryKey, primaryValue) - } else { - err = txn.Delete(primaryKey) - } - c.Assert(err, IsNil) - txnProbe := tikv.TxnProbe{KVTxn: txn} - tpc, err := txnProbe.NewCommitter(0) - c.Assert(err, IsNil) - tpc.SetPrimaryKey(primaryKey) - - ctx := context.Background() - err = tpc.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - - if commitPrimary { - commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - tpc.SetCommitTS(commitTS) - err = tpc.CommitMutations(ctx) - c.Assert(err, IsNil) - } - return txn.StartTS(), tpc.GetCommitTS() -} - -func (s *testAsyncCommitSuite) TestCheckSecondaries(c *C) { - // This test doesn't support tikv mode. - if *mockstore.WithTiKV { - return - } - - s.putAlphabets(c, true) - - loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a")) - c.Assert(err, IsNil) - newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID() - s.cluster.Split(loc.Region.GetID(), newRegionID, []byte("e"), []uint64{peerID}, peerID) - s.store.GetRegionCache().InvalidateCachedRegion(loc.Region) - - // No locks to check, only primary key is locked, should be successful. - s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, []byte("z"), []byte("z"), false) - lock := s.mustGetLock(c, []byte("z")) - lock.UseAsyncCommit = true - ts, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - var lockutil tikv.LockProbe - status := lockutil.NewLockStatus(nil, true, ts) - - resolver := tikv.LockResolverProbe{LockResolver: s.store.GetLockResolver()} - err = resolver.ResolveLockAsync(s.bo, lock, status) - c.Assert(err, IsNil) - currentTS, err := s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - status, err = resolver.GetTxnStatus(s.bo, lock.TxnID, []byte("z"), currentTS, currentTS, true, false, nil) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsTrue) - c.Assert(status.CommitTS(), Equals, ts) - - // One key is committed (i), one key is locked (a). Should get committed. - ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - commitTs := ts + 10 - - gotCheckA := int64(0) - gotCheckB := int64(0) - gotResolve := int64(0) - gotOther := int64(0) - mock := mockResolveClient{ - inner: s.store.GetTiKVClient(), - onCheckSecondaries: func(req *kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) { - if req.StartVersion != ts { - return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts) - } - var resp kvrpcpb.CheckSecondaryLocksResponse - for _, k := range req.Keys { - if bytes.Equal(k, []byte("a")) { - atomic.StoreInt64(&gotCheckA, 1) - - resp = kvrpcpb.CheckSecondaryLocksResponse{ - Locks: []*kvrpcpb.LockInfo{{Key: []byte("a"), PrimaryLock: []byte("z"), LockVersion: ts, UseAsyncCommit: true}}, - CommitTs: commitTs, - } - } else if bytes.Equal(k, []byte("i")) { - atomic.StoreInt64(&gotCheckB, 1) - - resp = kvrpcpb.CheckSecondaryLocksResponse{ - Locks: []*kvrpcpb.LockInfo{}, - CommitTs: commitTs, - } - } else { - fmt.Printf("Got other key: %s\n", k) - atomic.StoreInt64(&gotOther, 1) - } - } - return &tikvrpc.Response{Resp: &resp}, nil - }, - onResolveLock: func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) { - if req.StartVersion != ts { - return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts) - } - if req.CommitVersion != commitTs { - return nil, errors.Errorf("Bad commit version: %d, expected: %d", req.CommitVersion, commitTs) - } - for _, k := range req.Keys { - if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) { - atomic.StoreInt64(&gotResolve, 1) - } else { - atomic.StoreInt64(&gotOther, 1) - } - } - resp := kvrpcpb.ResolveLockResponse{} - return &tikvrpc.Response{Resp: &resp}, nil - }, - } - s.store.SetTiKVClient(&mock) - - status = lockutil.NewLockStatus([][]byte{[]byte("a"), []byte("i")}, true, 0) - lock = &tikv.Lock{ - Key: []byte("a"), - Primary: []byte("z"), - TxnID: ts, - LockType: kvrpcpb.Op_Put, - UseAsyncCommit: true, - MinCommitTS: ts + 5, - } - - _ = s.beginAsyncCommit(c) - - err = resolver.ResolveLockAsync(s.bo, lock, status) - c.Assert(err, IsNil) - c.Assert(gotCheckA, Equals, int64(1)) - c.Assert(gotCheckB, Equals, int64(1)) - c.Assert(gotOther, Equals, int64(0)) - c.Assert(gotResolve, Equals, int64(1)) - - // One key has been rolled back (b), one is locked (a). Should be rolled back. - ts, err = s.store.GetOracle().GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - commitTs = ts + 10 - - gotCheckA = int64(0) - gotCheckB = int64(0) - gotResolve = int64(0) - gotOther = int64(0) - mock.onResolveLock = func(req *kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) { - if req.StartVersion != ts { - return nil, errors.Errorf("Bad start version: %d, expected: %d", req.StartVersion, ts) - } - if req.CommitVersion != commitTs { - return nil, errors.Errorf("Bad commit version: %d, expected: 0", req.CommitVersion) - } - for _, k := range req.Keys { - if bytes.Equal(k, []byte("a")) || bytes.Equal(k, []byte("z")) { - atomic.StoreInt64(&gotResolve, 1) - } else { - atomic.StoreInt64(&gotOther, 1) - } - } - resp := kvrpcpb.ResolveLockResponse{} - return &tikvrpc.Response{Resp: &resp}, nil - } - - lock.TxnID = ts - lock.MinCommitTS = ts + 5 - - err = resolver.ResolveLockAsync(s.bo, lock, status) - c.Assert(err, IsNil) - c.Assert(gotCheckA, Equals, int64(1)) - c.Assert(gotCheckB, Equals, int64(1)) - c.Assert(gotResolve, Equals, int64(1)) - c.Assert(gotOther, Equals, int64(0)) -} - -func (s *testAsyncCommitSuite) TestRepeatableRead(c *C) { - var sessionID uint64 = 0 - test := func(isPessimistic bool) { - s.putKV(c, []byte("k1"), []byte("v1"), true) - - sessionID++ - ctx := context.WithValue(context.Background(), util.SessionID, sessionID) - txn1 := s.beginAsyncCommit(c) - txn1.SetPessimistic(isPessimistic) - s.mustGetFromTxn(c, txn1, []byte("k1"), []byte("v1")) - txn1.Set([]byte("k1"), []byte("v2")) - - for i := 0; i < 20; i++ { - _, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - } - - txn2 := s.beginAsyncCommit(c) - s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1")) - - err := txn1.Commit(ctx) - c.Assert(err, IsNil) - // Check txn1 is committed in async commit. - c.Assert(txn1.IsAsyncCommit(), IsTrue) - s.mustGetFromTxn(c, txn2, []byte("k1"), []byte("v1")) - err = txn2.Rollback() - c.Assert(err, IsNil) - - txn3 := s.beginAsyncCommit(c) - s.mustGetFromTxn(c, txn3, []byte("k1"), []byte("v2")) - err = txn3.Rollback() - c.Assert(err, IsNil) - } - - test(false) - test(true) -} - -// It's just a simple validation of linearizability. -// Extra tests are needed to test this feature with the control of the TiKV cluster. -func (s *testAsyncCommitSuite) TestAsyncCommitLinearizability(c *C) { - t1 := s.beginAsyncCommitWithLinearizability(c) - t2 := s.beginAsyncCommitWithLinearizability(c) - err := t1.Set([]byte("a"), []byte("a1")) - c.Assert(err, IsNil) - err = t2.Set([]byte("b"), []byte("b1")) - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - // t2 commits earlier than t1 - err = t2.Commit(ctx) - c.Assert(err, IsNil) - err = t1.Commit(ctx) - c.Assert(err, IsNil) - commitTS1 := t1.GetCommitTS() - commitTS2 := t2.GetCommitTS() - c.Assert(commitTS2, Less, commitTS1) -} - -// TestAsyncCommitWithMultiDC tests that async commit can only be enabled in global transactions -func (s *testAsyncCommitSuite) TestAsyncCommitWithMultiDC(c *C) { - // It requires setting placement rules to run with TiKV - if *mockstore.WithTiKV { - return - } - - localTxn := s.beginAsyncCommit(c) - err := localTxn.Set([]byte("a"), []byte("a1")) - localTxn.SetScope("bj") - c.Assert(err, IsNil) - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = localTxn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(localTxn.IsAsyncCommit(), IsFalse) - - globalTxn := s.beginAsyncCommit(c) - err = globalTxn.Set([]byte("b"), []byte("b1")) - globalTxn.SetScope(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - err = globalTxn.Commit(ctx) - c.Assert(err, IsNil) - c.Assert(globalTxn.IsAsyncCommit(), IsTrue) -} - -func (s *testAsyncCommitSuite) TestResolveTxnFallbackFromAsyncCommit(c *C) { - keys := [][]byte{[]byte("k0"), []byte("k1")} - values := [][]byte{[]byte("v00"), []byte("v10")} - initTest := func() tikv.CommitterProbe { - t0 := s.begin(c) - err := t0.Set(keys[0], values[0]) - c.Assert(err, IsNil) - err = t0.Set(keys[1], values[1]) - c.Assert(err, IsNil) - err = t0.Commit(context.Background()) - c.Assert(err, IsNil) - - t1 := s.beginAsyncCommit(c) - err = t1.Set(keys[0], []byte("v01")) - c.Assert(err, IsNil) - err = t1.Set(keys[1], []byte("v11")) - c.Assert(err, IsNil) - - committer, err := t1.NewCommitter(1) - c.Assert(err, IsNil) - committer.SetLockTTL(1) - committer.SetUseAsyncCommit() - return committer - } - prewriteKey := func(committer tikv.CommitterProbe, idx int, fallback bool) { - bo := tikv.NewBackofferWithVars(context.Background(), 5000, nil) - loc, err := s.store.GetRegionCache().LocateKey(bo, keys[idx]) - c.Assert(err, IsNil) - req := committer.BuildPrewriteRequest(loc.Region.GetID(), loc.Region.GetConfVer(), loc.Region.GetVer(), - committer.GetMutations().Slice(idx, idx+1), 1) - if fallback { - req.Req.(*kvrpcpb.PrewriteRequest).MaxCommitTs = 1 - } - resp, err := s.store.SendReq(bo, req, loc.Region, 5000) - c.Assert(err, IsNil) - c.Assert(resp.Resp, NotNil) - } - readKey := func(idx int) { - t2 := s.begin(c) - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - val, err := t2.Get(ctx, keys[idx]) - c.Assert(err, IsNil) - c.Assert(val, DeepEquals, values[idx]) - } - - // Case 1: Fallback primary, read primary - committer := initTest() - prewriteKey(committer, 0, true) - prewriteKey(committer, 1, false) - readKey(0) - readKey(1) - - // Case 2: Fallback primary, read secondary - committer = initTest() - prewriteKey(committer, 0, true) - prewriteKey(committer, 1, false) - readKey(1) - readKey(0) - - // Case 3: Fallback secondary, read primary - committer = initTest() - prewriteKey(committer, 0, false) - prewriteKey(committer, 1, true) - readKey(0) - readKey(1) - - // Case 4: Fallback secondary, read secondary - committer = initTest() - prewriteKey(committer, 0, false) - prewriteKey(committer, 1, true) - readKey(1) - readKey(0) - - // Case 5: Fallback both, read primary - committer = initTest() - prewriteKey(committer, 0, true) - prewriteKey(committer, 1, true) - readKey(0) - readKey(1) - - // Case 6: Fallback both, read secondary - committer = initTest() - prewriteKey(committer, 0, true) - prewriteKey(committer, 1, true) - readKey(1) - readKey(0) -} - -type mockResolveClient struct { - inner tikv.Client - onResolveLock func(*kvrpcpb.ResolveLockRequest) (*tikvrpc.Response, error) - onCheckSecondaries func(*kvrpcpb.CheckSecondaryLocksRequest) (*tikvrpc.Response, error) -} - -func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { - // Intercept check secondary locks and resolve lock messages if the callback is non-nil. - // If the callback returns (nil, nil), forward to the inner client. - if cr, ok := req.Req.(*kvrpcpb.CheckSecondaryLocksRequest); ok && m.onCheckSecondaries != nil { - result, err := m.onCheckSecondaries(cr) - if result != nil || err != nil { - return result, err - } - } else if rr, ok := req.Req.(*kvrpcpb.ResolveLockRequest); ok && m.onResolveLock != nil { - result, err := m.onResolveLock(rr) - if result != nil || err != nil { - return result, err - } - } - return m.inner.SendRequest(ctx, addr, req, timeout) -} - -func (m *mockResolveClient) Close() error { - return m.inner.Close() -} diff --git a/store/tikv/tests/client_fp_test.go b/store/tikv/tests/client_fp_test.go deleted file mode 100644 index da6a23140bc4b..0000000000000 --- a/store/tikv/tests/client_fp_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "time" - - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/parser/terror" - "github.com/tikv/client-go/v2/client" - "github.com/tikv/client-go/v2/tikvrpc" - "github.com/tikv/client-go/v2/util" -) - -// mock TiKV RPC client that hooks message by failpoint -type fpClient struct { - client.Client -} - -func (c fpClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { - switch req.Type { - case tikvrpc.CmdPrewrite: - if val, err := util.EvalFailpoint("rpcPrewriteResult"); err == nil && val != nil { - switch val.(string) { - case "timeout": - return nil, errors.New("timeout") - case "writeConflict": - return &tikvrpc.Response{ - Resp: &kvrpcpb.PrewriteResponse{Errors: []*kvrpcpb.KeyError{{Conflict: &kvrpcpb.WriteConflict{}}}}, - }, nil - } - } - case tikvrpc.CmdBatchGet: - batchGetReq := req.BatchGet() - if val, err := util.EvalFailpoint("rpcBatchGetResult"); err == nil { - switch val.(string) { - case "keyError": - return &tikvrpc.Response{ - Resp: &kvrpcpb.BatchGetResponse{Error: &kvrpcpb.KeyError{ - Locked: &kvrpcpb.LockInfo{ - PrimaryLock: batchGetReq.Keys[0], - LockVersion: batchGetReq.Version - 1, - Key: batchGetReq.Keys[0], - LockTtl: 50, - TxnSize: 1, - LockType: kvrpcpb.Op_Put, - }, - }}, - }, nil - } - } - case tikvrpc.CmdScan: - kvScanReq := req.Scan() - if val, err := util.EvalFailpoint("rpcScanResult"); err == nil { - switch val.(string) { - case "keyError": - return &tikvrpc.Response{ - Resp: &kvrpcpb.ScanResponse{Error: &kvrpcpb.KeyError{ - Locked: &kvrpcpb.LockInfo{ - PrimaryLock: kvScanReq.StartKey, - LockVersion: kvScanReq.Version - 1, - Key: kvScanReq.StartKey, - LockTtl: 50, - TxnSize: 1, - LockType: kvrpcpb.Op_Put, - }, - }}, - }, nil - } - } - } - - res, err := c.Client.SendRequest(ctx, addr, req, timeout) - - switch req.Type { - case tikvrpc.CmdPrewrite: - if val, err := util.EvalFailpoint("rpcPrewriteTimeout"); err == nil { - if val.(bool) { - return nil, terror.ErrResultUndetermined - } - } - } - return res, err -} diff --git a/store/tikv/tests/delete_range_test.go b/store/tikv/tests/delete_range_test.go deleted file mode 100644 index aa21dce78deec..0000000000000 --- a/store/tikv/tests/delete_range_test.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "math/rand" - "sort" - - . "github.com/pingcap/check" - "github.com/pingcap/tidb/store/mockstore/mockcopr" - "github.com/tikv/client-go/v2/mockstore/cluster" - "github.com/tikv/client-go/v2/mockstore/mocktikv" - "github.com/tikv/client-go/v2/tikv" -) - -type testDeleteRangeSuite struct { - OneByOneSuite - cluster cluster.Cluster - store *tikv.KVStore -} - -var _ = Suite(&testDeleteRangeSuite{}) - -func (s *testDeleteRangeSuite) SetUpTest(c *C) { - client, cluster, pdClient, err := mocktikv.NewTiKVAndPDClient("", mockcopr.NewCoprRPCHandler()) - c.Assert(err, IsNil) - mocktikv.BootstrapWithMultiRegions(cluster, []byte("b"), []byte("c"), []byte("d")) - s.cluster = cluster - store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0) - c.Check(err, IsNil) - - // TODO: make this possible - // store, err := mockstore.NewMockStore( - // mockstore.WithStoreType(mockstore.MockTiKV), - // mockstore.WithClusterInspector(func(c cluster.Cluster) { - // mockstore.BootstrapWithMultiRegions(c, []byte("b"), []byte("c"), []byte("d")) - // s.cluster = c - // }), - // ) - // c.Assert(err, IsNil) - - s.store = store -} - -func (s *testDeleteRangeSuite) TearDownTest(c *C) { - err := s.store.Close() - c.Assert(err, IsNil) -} - -func (s *testDeleteRangeSuite) checkData(c *C, expectedData map[string]string) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - it, err := txn.Iter([]byte("a"), nil) - c.Assert(err, IsNil) - - // Scan all data and save into a map - data := map[string]string{} - for it.Valid() { - data[string(it.Key())] = string(it.Value()) - err = it.Next() - c.Assert(err, IsNil) - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - // Print log - actualKeys := make([]string, 0, len(data)) - expectedKeys := make([]string, 0, len(expectedData)) - for key := range data { - actualKeys = append(actualKeys, key) - } - for key := range expectedData { - expectedKeys = append(expectedKeys, key) - } - sort.Strings(actualKeys) - sort.Strings(expectedKeys) - c.Log("Actual: ", actualKeys) - c.Log("Expected: ", expectedKeys) - - // Assert data in the store is the same as expected - c.Assert(data, DeepEquals, expectedData) -} - -func (s *testDeleteRangeSuite) deleteRange(c *C, startKey []byte, endKey []byte) int { - task := tikv.NewDeleteRangeTask(s.store, startKey, endKey, 1) - - err := task.Execute(context.Background()) - c.Assert(err, IsNil) - - return task.CompletedRegions() -} - -// deleteRangeFromMap deletes all keys in a given range from a map -func deleteRangeFromMap(m map[string]string, startKey []byte, endKey []byte) { - for keyStr := range m { - key := []byte(keyStr) - if bytes.Compare(startKey, key) <= 0 && bytes.Compare(key, endKey) < 0 { - delete(m, keyStr) - } - } -} - -// mustDeleteRange does delete range on both the map and the storage, and assert they are equal after deleting -func (s *testDeleteRangeSuite) mustDeleteRange(c *C, startKey []byte, endKey []byte, expected map[string]string, regions int) { - completedRegions := s.deleteRange(c, startKey, endKey) - deleteRangeFromMap(expected, startKey, endKey) - s.checkData(c, expected) - c.Assert(completedRegions, Equals, regions) -} - -func (s *testDeleteRangeSuite) TestDeleteRange(c *C) { - // Write some key-value pairs - txn, err := s.store.Begin() - c.Assert(err, IsNil) - - testData := map[string]string{} - - // Generate a sequence of keys and random values - for _, i := range []byte("abcd") { - for j := byte('0'); j <= byte('9'); j++ { - key := []byte{i, j} - value := []byte{byte(rand.Intn(256)), byte(rand.Intn(256))} - testData[string(key)] = string(value) - err := txn.Set(key, value) - c.Assert(err, IsNil) - } - } - - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - s.checkData(c, testData) - - s.mustDeleteRange(c, []byte("b"), []byte("c0"), testData, 2) - s.mustDeleteRange(c, []byte("c11"), []byte("c12"), testData, 1) - s.mustDeleteRange(c, []byte("d0"), []byte("d0"), testData, 0) - s.mustDeleteRange(c, []byte("d0\x00"), []byte("d1\x00"), testData, 1) - s.mustDeleteRange(c, []byte("c5"), []byte("d5"), testData, 2) - s.mustDeleteRange(c, []byte("a"), []byte("z"), testData, 4) -} diff --git a/store/tikv/tests/isolation_test.go b/store/tikv/tests/isolation_test.go deleted file mode 100644 index 0bfc72da14f30..0000000000000 --- a/store/tikv/tests/isolation_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !race - -package tikv_test - -import ( - "context" - "fmt" - "sort" - "sync" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/tidb/kv" - "github.com/tikv/client-go/v2/tikv" -) - -// testIsolationSuite represents test isolation suite. -// The test suite takes too long under the race detector. -type testIsolationSuite struct { - OneByOneSuite - store *tikv.KVStore -} - -var _ = Suite(&testIsolationSuite{}) - -func (s *testIsolationSuite) SetUpSuite(c *C) { - s.OneByOneSuite.SetUpSuite(c) - s.store = NewTestStore(c) -} - -func (s *testIsolationSuite) TearDownSuite(c *C) { - s.store.Close() - s.OneByOneSuite.TearDownSuite(c) -} - -type writeRecord struct { - startTS uint64 - commitTS uint64 -} - -type writeRecords []writeRecord - -func (r writeRecords) Len() int { return len(r) } -func (r writeRecords) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r writeRecords) Less(i, j int) bool { return r[i].startTS <= r[j].startTS } - -func (s *testIsolationSuite) SetWithRetry(c *C, k, v []byte) writeRecord { - for { - txnRaw, err := s.store.Begin() - c.Assert(err, IsNil) - - txn := tikv.TxnProbe{KVTxn: txnRaw} - - err = txn.Set(k, v) - c.Assert(err, IsNil) - - err = txn.Commit(context.Background()) - if err == nil { - return writeRecord{ - startTS: txn.StartTS(), - commitTS: txn.GetCommitTS(), - } - } - } -} - -type readRecord struct { - startTS uint64 - value []byte -} - -type readRecords []readRecord - -func (r readRecords) Len() int { return len(r) } -func (r readRecords) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r readRecords) Less(i, j int) bool { return r[i].startTS <= r[j].startTS } - -func (s *testIsolationSuite) GetWithRetry(c *C, k []byte) readRecord { - for { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - - val, err := txn.Get(context.TODO(), k) - if err == nil { - return readRecord{ - startTS: txn.StartTS(), - value: val, - } - } - c.Assert(kv.IsTxnRetryableError(err), IsTrue) - } -} - -func (s *testIsolationSuite) TestWriteWriteConflict(c *C) { - const ( - threadCount = 10 - setPerThread = 50 - ) - var ( - mu sync.Mutex - writes []writeRecord - wg sync.WaitGroup - ) - wg.Add(threadCount) - for i := 0; i < threadCount; i++ { - go func() { - defer wg.Done() - for j := 0; j < setPerThread; j++ { - w := s.SetWithRetry(c, []byte("k"), []byte("v")) - mu.Lock() - writes = append(writes, w) - mu.Unlock() - } - }() - } - wg.Wait() - - // Check all transactions' [startTS, commitTS] are not overlapped. - sort.Sort(writeRecords(writes)) - for i := 0; i < len(writes)-1; i++ { - c.Assert(writes[i].commitTS, Less, writes[i+1].startTS) - } -} - -func (s *testIsolationSuite) TestReadWriteConflict(c *C) { - const ( - readThreadCount = 10 - writeCount = 10 - ) - - var ( - writes []writeRecord - mu sync.Mutex - reads []readRecord - wg sync.WaitGroup - ) - - s.SetWithRetry(c, []byte("k"), []byte("0")) - - writeDone := make(chan struct{}) - go func() { - for i := 1; i <= writeCount; i++ { - w := s.SetWithRetry(c, []byte("k"), []byte(fmt.Sprintf("%d", i))) - writes = append(writes, w) - time.Sleep(time.Microsecond * 10) - } - close(writeDone) - }() - - wg.Add(readThreadCount) - for i := 0; i < readThreadCount; i++ { - go func() { - defer wg.Done() - for { - select { - case <-writeDone: - return - default: - } - r := s.GetWithRetry(c, []byte("k")) - mu.Lock() - reads = append(reads, r) - mu.Unlock() - } - }() - } - wg.Wait() - - sort.Sort(readRecords(reads)) - - // Check all reads got the value committed before it's startTS. - var i, j int - for ; i < len(writes); i++ { - for ; j < len(reads); j++ { - w, r := writes[i], reads[j] - if r.startTS >= w.commitTS { - break - } - c.Assert(string(r.value), Equals, fmt.Sprintf("%d", i)) - } - } - for ; j < len(reads); j++ { - c.Assert(string(reads[j].value), Equals, fmt.Sprintf("%d", len(writes))) - } -} diff --git a/store/tikv/tests/lock_test.go b/store/tikv/tests/lock_test.go deleted file mode 100644 index 17f16a9dbca6c..0000000000000 --- a/store/tikv/tests/lock_test.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "fmt" - "math" - "runtime" - "sync" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - deadlockpb "github.com/pingcap/kvproto/pkg/deadlock" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/tikvrpc" -) - -var getMaxBackoff = tikv.ConfigProbe{}.GetGetMaxBackoff() - -type testLockSuite struct { - OneByOneSuite - store tikv.StoreProbe -} - -var _ = Suite(&testLockSuite{}) - -func (s *testLockSuite) SetUpTest(c *C) { - s.store = tikv.StoreProbe{KVStore: NewTestStore(c)} -} - -func (s *testLockSuite) TearDownTest(c *C) { - s.store.Close() -} - -func (s *testLockSuite) lockKey(c *C, key, value, primaryKey, primaryValue []byte, commitPrimary bool) (uint64, uint64) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - if len(value) > 0 { - err = txn.Set(key, value) - } else { - err = txn.Delete(key) - } - c.Assert(err, IsNil) - - if len(primaryValue) > 0 { - err = txn.Set(primaryKey, primaryValue) - } else { - err = txn.Delete(primaryKey) - } - c.Assert(err, IsNil) - tpc, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - tpc.SetPrimaryKey(primaryKey) - - ctx := context.Background() - err = tpc.PrewriteAllMutations(ctx) - c.Assert(err, IsNil) - - if commitPrimary { - commitTS, err := s.store.GetOracle().GetTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - tpc.SetCommitTS(commitTS) - err = tpc.CommitMutations(ctx) - c.Assert(err, IsNil) - } - return txn.StartTS(), tpc.GetCommitTS() -} - -func (s *testLockSuite) putAlphabets(c *C) { - for ch := byte('a'); ch <= byte('z'); ch++ { - s.putKV(c, []byte{ch}, []byte{ch}) - } -} - -func (s *testLockSuite) putKV(c *C, key, value []byte) (uint64, uint64) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = txn.Set(key, value) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - return txn.StartTS(), txn.GetCommitTS() -} - -func (s *testLockSuite) prepareAlphabetLocks(c *C) { - s.putKV(c, []byte("c"), []byte("cc")) - s.lockKey(c, []byte("c"), []byte("c"), []byte("z1"), []byte("z1"), true) - s.lockKey(c, []byte("d"), []byte("dd"), []byte("z2"), []byte("z2"), false) - s.lockKey(c, []byte("foo"), []byte("foo"), []byte("z3"), []byte("z3"), false) - s.putKV(c, []byte("bar"), []byte("bar")) - s.lockKey(c, []byte("bar"), nil, []byte("z4"), []byte("z4"), true) -} - -func (s *testLockSuite) TestScanLockResolveWithGet(c *C) { - s.putAlphabets(c) - s.prepareAlphabetLocks(c) - - txn, err := s.store.Begin() - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - v, err := txn.Get(context.TODO(), []byte{ch}) - c.Assert(err, IsNil) - c.Assert(v, BytesEquals, []byte{ch}) - } -} - -func (s *testLockSuite) TestScanLockResolveWithSeek(c *C) { - s.putAlphabets(c) - s.prepareAlphabetLocks(c) - - txn, err := s.store.Begin() - c.Assert(err, IsNil) - iter, err := txn.Iter([]byte("a"), nil) - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - c.Assert(iter.Valid(), IsTrue) - c.Assert(iter.Key(), BytesEquals, []byte{ch}) - c.Assert(iter.Value(), BytesEquals, []byte{ch}) - c.Assert(iter.Next(), IsNil) - } -} - -func (s *testLockSuite) TestScanLockResolveWithSeekKeyOnly(c *C) { - s.putAlphabets(c) - s.prepareAlphabetLocks(c) - - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.GetSnapshot().SetKeyOnly(true) - iter, err := txn.Iter([]byte("a"), nil) - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - c.Assert(iter.Valid(), IsTrue) - c.Assert(iter.Key(), BytesEquals, []byte{ch}) - c.Assert(iter.Next(), IsNil) - } -} - -func (s *testLockSuite) TestScanLockResolveWithBatchGet(c *C) { - s.putAlphabets(c) - s.prepareAlphabetLocks(c) - - var keys [][]byte - for ch := byte('a'); ch <= byte('z'); ch++ { - keys = append(keys, []byte{ch}) - } - - txn, err := s.store.Begin() - c.Assert(err, IsNil) - m, err := toTiDBTxn(&txn).BatchGet(context.Background(), toTiDBKeys(keys)) - c.Assert(err, IsNil) - c.Assert(len(m), Equals, int('z'-'a'+1)) - for ch := byte('a'); ch <= byte('z'); ch++ { - k := []byte{ch} - c.Assert(m[string(k)], BytesEquals, k) - } -} - -func (s *testLockSuite) TestCleanLock(c *C) { - for ch := byte('a'); ch <= byte('z'); ch++ { - k := []byte{ch} - s.lockKey(c, k, k, k, k, false) - } - txn, err := s.store.Begin() - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - err = txn.Set([]byte{ch}, []byte{ch + 1}) - c.Assert(err, IsNil) - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) -} - -func (s *testLockSuite) TestGetTxnStatus(c *C) { - startTS, commitTS := s.putKV(c, []byte("a"), []byte("a")) - status, err := s.store.GetLockResolver().GetTxnStatus(startTS, startTS, []byte("a")) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsTrue) - c.Assert(status.CommitTS(), Equals, commitTS) - - startTS, commitTS = s.lockKey(c, []byte("a"), []byte("a"), []byte("a"), []byte("a"), true) - status, err = s.store.GetLockResolver().GetTxnStatus(startTS, startTS, []byte("a")) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsTrue) - c.Assert(status.CommitTS(), Equals, commitTS) - - startTS, _ = s.lockKey(c, []byte("a"), []byte("a"), []byte("a"), []byte("a"), false) - status, err = s.store.GetLockResolver().GetTxnStatus(startTS, startTS, []byte("a")) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsFalse) - c.Assert(status.TTL(), Greater, uint64(0), Commentf("action:%s", status.Action())) -} - -func (s *testLockSuite) TestCheckTxnStatusTTL(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - s.prewriteTxnWithTTL(c, txn, 1000) - - bo := tikv.NewBackofferWithVars(context.Background(), tikv.PrewriteMaxBackoff, nil) - lr := s.store.NewLockResolver() - callerStartTS, err := s.store.GetOracle().GetTimestamp(bo.GetCtx(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - - // Check the lock TTL of a transaction. - status, err := lr.LockResolver.GetTxnStatus(txn.StartTS(), callerStartTS, []byte("key")) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsFalse) - c.Assert(status.TTL(), Greater, uint64(0)) - c.Assert(status.CommitTS(), Equals, uint64(0)) - - // Rollback the txn. - lock := s.mustGetLock(c, []byte("key")) - err = s.store.NewLockResolver().ResolveLock(context.Background(), lock) - c.Assert(err, IsNil) - - // Check its status is rollbacked. - status, err = lr.LockResolver.GetTxnStatus(txn.StartTS(), callerStartTS, []byte("key")) - c.Assert(err, IsNil) - c.Assert(status.TTL(), Equals, uint64(0)) - c.Assert(status.CommitTS(), Equals, uint64(0)) - c.Assert(status.Action(), Equals, kvrpcpb.Action_NoAction) - - // Check a committed txn. - startTS, commitTS := s.putKV(c, []byte("a"), []byte("a")) - status, err = lr.LockResolver.GetTxnStatus(startTS, callerStartTS, []byte("a")) - c.Assert(err, IsNil) - c.Assert(status.TTL(), Equals, uint64(0)) - c.Assert(status.CommitTS(), Equals, commitTS) -} - -func (s *testLockSuite) TestTxnHeartBeat(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - s.prewriteTxn(c, txn) - - newTTL, err := s.store.SendTxnHeartbeat(context.Background(), []byte("key"), txn.StartTS(), 6666) - c.Assert(err, IsNil) - c.Assert(newTTL, Equals, uint64(6666)) - - newTTL, err = s.store.SendTxnHeartbeat(context.Background(), []byte("key"), txn.StartTS(), 5555) - c.Assert(err, IsNil) - c.Assert(newTTL, Equals, uint64(6666)) - - lock := s.mustGetLock(c, []byte("key")) - err = s.store.NewLockResolver().ResolveLock(context.Background(), lock) - c.Assert(err, IsNil) - - newTTL, err = s.store.SendTxnHeartbeat(context.Background(), []byte("key"), txn.StartTS(), 6666) - c.Assert(err, NotNil) - c.Assert(newTTL, Equals, uint64(0)) -} - -func (s *testLockSuite) TestCheckTxnStatus(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - txn.Set([]byte("second"), []byte("xxx")) - s.prewriteTxnWithTTL(c, txn, 1000) - - o := s.store.GetOracle() - currentTS, err := o.GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - c.Assert(currentTS, Greater, txn.StartTS()) - - bo := tikv.NewBackofferWithVars(context.Background(), tikv.PrewriteMaxBackoff, nil) - resolver := s.store.NewLockResolver() - // Call getTxnStatus to check the lock status. - status, err := resolver.GetTxnStatus(bo, txn.StartTS(), []byte("key"), currentTS, currentTS, true, false, nil) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsFalse) - c.Assert(status.TTL(), Greater, uint64(0)) - c.Assert(status.CommitTS(), Equals, uint64(0)) - c.Assert(status.Action(), Equals, kvrpcpb.Action_MinCommitTSPushed) - - // Test the ResolveLocks API - lock := s.mustGetLock(c, []byte("second")) - timeBeforeExpire, _, err := resolver.ResolveLocks(bo, currentTS, []*tikv.Lock{lock}) - c.Assert(err, IsNil) - c.Assert(timeBeforeExpire > int64(0), IsTrue) - - // Force rollback the lock using lock.TTL = 0. - lock.TTL = uint64(0) - timeBeforeExpire, _, err = resolver.ResolveLocks(bo, currentTS, []*tikv.Lock{lock}) - c.Assert(err, IsNil) - c.Assert(timeBeforeExpire, Equals, int64(0)) - - // Then call getTxnStatus again and check the lock status. - currentTS, err = o.GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - status, err = s.store.NewLockResolver().GetTxnStatus(bo, txn.StartTS(), []byte("key"), currentTS, 0, true, false, nil) - c.Assert(err, IsNil) - c.Assert(status.TTL(), Equals, uint64(0)) - c.Assert(status.CommitTS(), Equals, uint64(0)) - c.Assert(status.Action(), Equals, kvrpcpb.Action_NoAction) - - // Call getTxnStatus on a committed transaction. - startTS, commitTS := s.putKV(c, []byte("a"), []byte("a")) - status, err = s.store.NewLockResolver().GetTxnStatus(bo, startTS, []byte("a"), currentTS, currentTS, true, false, nil) - c.Assert(err, IsNil) - c.Assert(status.TTL(), Equals, uint64(0)) - c.Assert(status.CommitTS(), Equals, commitTS) -} - -func (s *testLockSuite) TestCheckTxnStatusNoWait(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - txn.Set([]byte("second"), []byte("xxx")) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - // Increase lock TTL to make CI more stable. - committer.SetLockTTLByTimeAndSize(txn.GetStartTime(), 200*1024*1024) - - // Only prewrite the secondary key to simulate a concurrent prewrite case: - // prewrite secondary regions success and prewrite the primary region is pending. - err = committer.PrewriteMutations(context.Background(), committer.MutationsOfKeys([][]byte{[]byte("second")})) - c.Assert(err, IsNil) - - o := s.store.GetOracle() - currentTS, err := o.GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - bo := tikv.NewBackofferWithVars(context.Background(), tikv.PrewriteMaxBackoff, nil) - resolver := s.store.NewLockResolver() - - // Call getTxnStatus for the TxnNotFound case. - _, err = resolver.GetTxnStatus(bo, txn.StartTS(), []byte("key"), currentTS, currentTS, false, false, nil) - c.Assert(err, NotNil) - c.Assert(resolver.IsErrorNotFound(err), IsTrue) - - errCh := make(chan error) - go func() { - errCh <- committer.PrewriteMutations(context.Background(), committer.MutationsOfKeys([][]byte{[]byte("key")})) - }() - - lock := &tikv.Lock{ - Key: []byte("second"), - Primary: []byte("key"), - TxnID: txn.StartTS(), - TTL: 100000, - } - // Call getTxnStatusFromLock to cover the retry logic. - status, err := resolver.GetTxnStatusFromLock(bo, lock, currentTS, false) - c.Assert(err, IsNil) - c.Assert(status.TTL(), Greater, uint64(0)) - c.Assert(<-errCh, IsNil) - c.Assert(committer.CleanupMutations(context.Background()), IsNil) - - // Call getTxnStatusFromLock to cover TxnNotFound and retry timeout. - startTS, err := o.GetTimestamp(context.Background(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(err, IsNil) - lock = &tikv.Lock{ - Key: []byte("second"), - Primary: []byte("key_not_exist"), - TxnID: startTS, - TTL: 1000, - } - status, err = resolver.GetTxnStatusFromLock(bo, lock, currentTS, false) - c.Assert(err, IsNil) - c.Assert(status.TTL(), Equals, uint64(0)) - c.Assert(status.CommitTS(), Equals, uint64(0)) - c.Assert(status.Action(), Equals, kvrpcpb.Action_LockNotExistRollback) -} - -func (s *testLockSuite) prewriteTxn(c *C, txn tikv.TxnProbe) { - s.prewriteTxnWithTTL(c, txn, 0) -} - -func (s *testLockSuite) prewriteTxnWithTTL(c *C, txn tikv.TxnProbe, ttl uint64) { - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - if ttl > 0 { - elapsed := time.Since(txn.GetStartTime()) / time.Millisecond - committer.SetLockTTL(uint64(elapsed) + ttl) - } - err = committer.PrewriteAllMutations(context.Background()) - c.Assert(err, IsNil) -} - -func (s *testLockSuite) mustGetLock(c *C, key []byte) *tikv.Lock { - ver, err := s.store.CurrentTimestamp(oracle.GlobalTxnScope) - c.Assert(err, IsNil) - bo := tikv.NewBackofferWithVars(context.Background(), getMaxBackoff, nil) - req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{ - Key: key, - Version: ver, - }) - loc, err := s.store.GetRegionCache().LocateKey(bo, key) - c.Assert(err, IsNil) - resp, err := s.store.SendReq(bo, req, loc.Region, tikv.ReadTimeoutShort) - c.Assert(err, IsNil) - c.Assert(resp.Resp, NotNil) - keyErr := resp.Resp.(*kvrpcpb.GetResponse).GetError() - c.Assert(keyErr, NotNil) - lock, err := tikv.LockProbe{}.ExtractLockFromKeyErr(keyErr) - c.Assert(err, IsNil) - return lock -} - -func (s *testLockSuite) ttlEquals(c *C, x, y uint64) { - // NOTE: On ppc64le, all integers are by default unsigned integers, - // hence we have to separately cast the value returned by "math.Abs()" function for ppc64le. - if runtime.GOARCH == "ppc64le" { - c.Assert(int(-math.Abs(float64(x-y))), LessEqual, 2) - } else { - c.Assert(int(math.Abs(float64(x-y))), LessEqual, 2) - } - -} - -func (s *testLockSuite) TestLockTTL(c *C) { - defaultLockTTL := tikv.ConfigProbe{}.GetDefaultLockTTL() - ttlFactor := tikv.ConfigProbe{}.GetTTLFactor() - - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - time.Sleep(time.Millisecond) - s.prewriteTxnWithTTL(c, txn, 3100) - l := s.mustGetLock(c, []byte("key")) - c.Assert(l.TTL >= defaultLockTTL, IsTrue) - - // Huge txn has a greater TTL. - txn, err = s.store.Begin() - start := time.Now() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - for i := 0; i < 2048; i++ { - k, v := randKV(1024, 1024) - txn.Set([]byte(k), []byte(v)) - } - s.prewriteTxn(c, txn) - l = s.mustGetLock(c, []byte("key")) - s.ttlEquals(c, l.TTL, uint64(ttlFactor*2)+uint64(time.Since(start)/time.Millisecond)) - - // Txn with long read time. - start = time.Now() - txn, err = s.store.Begin() - c.Assert(err, IsNil) - time.Sleep(time.Millisecond * 50) - txn.Set([]byte("key"), []byte("value")) - s.prewriteTxn(c, txn) - l = s.mustGetLock(c, []byte("key")) - s.ttlEquals(c, l.TTL, defaultLockTTL+uint64(time.Since(start)/time.Millisecond)) -} - -func (s *testLockSuite) TestBatchResolveLocks(c *C) { - // The first transaction is a normal transaction with a long TTL - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("k1"), []byte("v1")) - txn.Set([]byte("k2"), []byte("v2")) - s.prewriteTxnWithTTL(c, txn, 20000) - - // The second transaction is an async commit transaction - txn, err = s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("k3"), []byte("v3")) - txn.Set([]byte("k4"), []byte("v4")) - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - committer.SetUseAsyncCommit() - committer.SetLockTTL(20000) - committer.PrewriteAllMutations(context.Background()) - c.Assert(err, IsNil) - - var locks []*tikv.Lock - for _, key := range []string{"k1", "k2", "k3", "k4"} { - l := s.mustGetLock(c, []byte(key)) - locks = append(locks, l) - } - - // Locks may not expired - msBeforeLockExpired := s.store.GetOracle().UntilExpired(locks[0].TxnID, locks[1].TTL, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(msBeforeLockExpired, Greater, int64(0)) - msBeforeLockExpired = s.store.GetOracle().UntilExpired(locks[3].TxnID, locks[3].TTL, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) - c.Assert(msBeforeLockExpired, Greater, int64(0)) - - lr := s.store.NewLockResolver() - bo := tikv.NewGcResolveLockMaxBackoffer(context.Background()) - loc, err := s.store.GetRegionCache().LocateKey(bo, locks[0].Primary) - c.Assert(err, IsNil) - // Check BatchResolveLocks resolve the lock even the ttl is not expired. - success, err := lr.BatchResolveLocks(bo, locks, loc.Region) - c.Assert(success, IsTrue) - c.Assert(err, IsNil) - - txn, err = s.store.Begin() - c.Assert(err, IsNil) - // transaction 1 is rolled back - _, err = txn.Get(context.Background(), []byte("k1")) - c.Assert(err, Equals, tikverr.ErrNotExist) - _, err = txn.Get(context.Background(), []byte("k2")) - c.Assert(err, Equals, tikverr.ErrNotExist) - // transaction 2 is committed - v, err := txn.Get(context.Background(), []byte("k3")) - c.Assert(err, IsNil) - c.Assert(bytes.Equal(v, []byte("v3")), IsTrue) - v, err = txn.Get(context.Background(), []byte("k4")) - c.Assert(err, IsNil) - c.Assert(bytes.Equal(v, []byte("v4")), IsTrue) -} - -func (s *testLockSuite) TestNewLockZeroTTL(c *C) { - l := tikv.NewLock(&kvrpcpb.LockInfo{}) - c.Assert(l.TTL, Equals, uint64(0)) -} - -func init() { - // Speed up tests. - tikv.ConfigProbe{}.SetOracleUpdateInterval(2) -} - -func (s *testLockSuite) TestZeroMinCommitTS(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - txn.Set([]byte("key"), []byte("value")) - bo := tikv.NewBackofferWithVars(context.Background(), tikv.PrewriteMaxBackoff, nil) - - mockValue := fmt.Sprintf(`return(%d)`, txn.StartTS()) - c.Assert(failpoint.Enable("tikvclient/mockZeroCommitTS", mockValue), IsNil) - s.prewriteTxnWithTTL(c, txn, 1000) - c.Assert(failpoint.Disable("tikvclient/mockZeroCommitTS"), IsNil) - - lock := s.mustGetLock(c, []byte("key")) - expire, pushed, err := s.store.NewLockResolver().ResolveLocks(bo, 0, []*tikv.Lock{lock}) - c.Assert(err, IsNil) - c.Assert(pushed, HasLen, 0) - c.Assert(expire, Greater, int64(0)) - - expire, pushed, err = s.store.NewLockResolver().ResolveLocks(bo, math.MaxUint64, []*tikv.Lock{lock}) - c.Assert(err, IsNil) - c.Assert(pushed, HasLen, 1) - c.Assert(expire, Greater, int64(0)) - - // Clean up this test. - lock.TTL = uint64(0) - expire, _, err = s.store.NewLockResolver().ResolveLocks(bo, 0, []*tikv.Lock{lock}) - c.Assert(err, IsNil) - c.Assert(expire, Equals, int64(0)) -} - -func (s *testLockSuite) prepareTxnFallenBackFromAsyncCommit(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = txn.Set([]byte("fb1"), []byte("1")) - c.Assert(err, IsNil) - err = txn.Set([]byte("fb2"), []byte("2")) - c.Assert(err, IsNil) - - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - c.Assert(committer.GetMutations().Len(), Equals, 2) - committer.SetLockTTL(0) - committer.SetUseAsyncCommit() - committer.SetCommitTS(committer.GetStartTS() + (100 << 18)) // 100ms - - err = committer.PrewriteMutations(context.Background(), committer.GetMutations().Slice(0, 1)) - c.Assert(err, IsNil) - c.Assert(committer.IsAsyncCommit(), IsTrue) - - // Set an invalid maxCommitTS to produce MaxCommitTsTooLarge - committer.SetMaxCommitTS(committer.GetStartTS() - 1) - err = committer.PrewriteMutations(context.Background(), committer.GetMutations().Slice(1, 2)) - c.Assert(err, IsNil) - c.Assert(committer.IsAsyncCommit(), IsFalse) // Fallback due to MaxCommitTsTooLarge -} - -func (s *testLockSuite) TestCheckLocksFallenBackFromAsyncCommit(c *C) { - s.prepareTxnFallenBackFromAsyncCommit(c) - - lock := s.mustGetLock(c, []byte("fb1")) - c.Assert(lock.UseAsyncCommit, IsTrue) - bo := tikv.NewBackoffer(context.Background(), getMaxBackoff) - lr := s.store.NewLockResolver() - status, err := lr.GetTxnStatusFromLock(bo, lock, 0, false) - c.Assert(err, IsNil) - c.Assert(tikv.LockProbe{}.GetPrimaryKeyFromTxnStatus(status), DeepEquals, []byte("fb1")) - - err = lr.CheckAllSecondaries(bo, lock, &status) - c.Assert(lr.IsNonAsyncCommitLock(err), IsTrue) - - status, err = lr.GetTxnStatusFromLock(bo, lock, 0, true) - c.Assert(err, IsNil) - c.Assert(status.Action(), Equals, kvrpcpb.Action_TTLExpireRollback) - c.Assert(status.TTL(), Equals, uint64(0)) -} - -func (s *testLockSuite) TestResolveTxnFallenBackFromAsyncCommit(c *C) { - s.prepareTxnFallenBackFromAsyncCommit(c) - - lock := s.mustGetLock(c, []byte("fb1")) - c.Assert(lock.UseAsyncCommit, IsTrue) - bo := tikv.NewBackoffer(context.Background(), getMaxBackoff) - expire, pushed, err := s.store.NewLockResolver().ResolveLocks(bo, 0, []*tikv.Lock{lock}) - c.Assert(err, IsNil) - c.Assert(expire, Equals, int64(0)) - c.Assert(len(pushed), Equals, 0) - - t3, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = t3.Get(context.Background(), []byte("fb1")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - _, err = t3.Get(context.Background(), []byte("fb2")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) -} - -func (s *testLockSuite) TestBatchResolveTxnFallenBackFromAsyncCommit(c *C) { - s.prepareTxnFallenBackFromAsyncCommit(c) - - lock := s.mustGetLock(c, []byte("fb1")) - c.Assert(lock.UseAsyncCommit, IsTrue) - bo := tikv.NewBackoffer(context.Background(), getMaxBackoff) - loc, err := s.store.GetRegionCache().LocateKey(bo, []byte("fb1")) - c.Assert(err, IsNil) - ok, err := s.store.NewLockResolver().BatchResolveLocks(bo, []*tikv.Lock{lock}, loc.Region) - c.Assert(err, IsNil) - c.Assert(ok, IsTrue) - - t3, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = t3.Get(context.Background(), []byte("fb1")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - _, err = t3.Get(context.Background(), []byte("fb2")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) -} - -func (s *testLockSuite) TestDeadlockReportWaitChain(c *C) { - // Utilities to make the test logic clear and simple. - type txnWrapper struct { - tikv.TxnProbe - wg sync.WaitGroup - } - - makeLockCtx := func(txn *txnWrapper, resourceGroupTag string) *kv.LockCtx { - return &kv.LockCtx{ - ForUpdateTS: txn.StartTS(), - WaitStartTime: time.Now(), - LockWaitTime: 1000, - ResourceGroupTag: []byte(resourceGroupTag), - } - } - - // Prepares several transactions and each locks a key. - prepareTxns := func(num int) []*txnWrapper { - res := make([]*txnWrapper, 0, num) - for i := 0; i < num; i++ { - txnProbe, err := s.store.Begin() - c.Assert(err, IsNil) - txn := &txnWrapper{TxnProbe: txnProbe} - txn.SetPessimistic(true) - tag := fmt.Sprintf("tag-init%v", i) - key := []byte{'k', byte(i)} - err = txn.LockKeys(context.Background(), makeLockCtx(txn, tag), key) - c.Assert(err, IsNil) - - res = append(res, txn) - } - return res - } - - // Let the i-th trnasaction lock the key that has been locked by j-th transaction - tryLock := func(txns []*txnWrapper, i int, j int) error { - c.Logf("txn %v try locking %v", i, j) - txn := txns[i] - tag := fmt.Sprintf("tag-%v-%v", i, j) - key := []byte{'k', byte(j)} - return txn.LockKeys(context.Background(), makeLockCtx(txn, tag), key) - } - - // Asserts the i-th transaction waits for the j-th transaction. - makeWaitFor := func(txns []*txnWrapper, i int, j int) { - txns[i].wg.Add(1) - go func() { - defer txns[i].wg.Done() - err := tryLock(txns, i, j) - // After the lock being waited for is released, the transaction returns a WriteConflict error - // unconditionally, which is by design. - c.Assert(err, NotNil) - c.Logf("txn %v wait for %v finished, err: %s", i, j, err.Error()) - _, ok := errors.Cause(err).(*tikverr.ErrWriteConflict) - c.Assert(ok, IsTrue) - }() - } - - waitAndRollback := func(txns []*txnWrapper, i int) { - // It's expected that each transaction should be rolled back after its blocker, so that `Rollback` will not - // run when there's concurrent `LockKeys` running. - // If it's blocked on the `Wait` forever, it means the transaction's blocker is not rolled back. - c.Logf("rollback txn %v", i) - txns[i].wg.Wait() - err := txns[i].Rollback() - c.Assert(err, IsNil) - } - - // Check the given WaitForEntry is caused by txn[i] waiting for txn[j]. - checkWaitChainEntry := func(txns []*txnWrapper, entry *deadlockpb.WaitForEntry, i, j int) { - c.Assert(entry.Txn, Equals, txns[i].StartTS()) - c.Assert(entry.WaitForTxn, Equals, txns[j].StartTS()) - c.Assert(entry.Key, BytesEquals, []byte{'k', byte(j)}) - c.Assert(string(entry.ResourceGroupTag), Equals, fmt.Sprintf("tag-%v-%v", i, j)) - } - - c.Log("test case 1: 1->0->1") - - txns := prepareTxns(2) - - makeWaitFor(txns, 0, 1) - // Sleep for a while to make sure it has been blocked. - time.Sleep(time.Millisecond * 100) - - // txn2 tries locking k1 and encounters deadlock error. - err := tryLock(txns, 1, 0) - c.Assert(err, NotNil) - dl, ok := errors.Cause(err).(*tikverr.ErrDeadlock) - c.Assert(ok, IsTrue) - - waitChain := dl.GetWaitChain() - c.Assert(len(waitChain), Equals, 2) - checkWaitChainEntry(txns, waitChain[0], 0, 1) - checkWaitChainEntry(txns, waitChain[1], 1, 0) - - // Each transaction should be rolled back after its blocker being rolled back - waitAndRollback(txns, 1) - waitAndRollback(txns, 0) - - c.Log("test case 2: 3->2->0->1->3") - txns = prepareTxns(4) - - makeWaitFor(txns, 0, 1) - makeWaitFor(txns, 2, 0) - makeWaitFor(txns, 1, 3) - // Sleep for a while to make sure it has been blocked. - time.Sleep(time.Millisecond * 100) - - err = tryLock(txns, 3, 2) - c.Assert(err, NotNil) - dl, ok = errors.Cause(err).(*tikverr.ErrDeadlock) - c.Assert(ok, IsTrue) - - waitChain = dl.GetWaitChain() - c.Assert(len(waitChain), Equals, 4) - c.Logf("wait chain: \n** %v\n**%v\n**%v\n**%v\n", waitChain[0], waitChain[1], waitChain[2], waitChain[3]) - checkWaitChainEntry(txns, waitChain[0], 2, 0) - checkWaitChainEntry(txns, waitChain[1], 0, 1) - checkWaitChainEntry(txns, waitChain[2], 1, 3) - checkWaitChainEntry(txns, waitChain[3], 3, 2) - - // Each transaction should be rolled back after its blocker being rolled back - waitAndRollback(txns, 3) - waitAndRollback(txns, 1) - waitAndRollback(txns, 0) - waitAndRollback(txns, 2) -} diff --git a/store/tikv/tests/prewrite_test.go b/store/tikv/tests/prewrite_test.go deleted file mode 100644 index 126bed5c2a050..0000000000000 --- a/store/tikv/tests/prewrite_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - . "github.com/pingcap/check" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/pingcap/tidb/store/mockstore/unistore" - "github.com/tikv/client-go/v2/tikv" -) - -type testPrewriteSuite struct { - store *tikv.KVStore -} - -var _ = Suite(&testPrewriteSuite{}) - -func (s *testPrewriteSuite) SetUpTest(c *C) { - client, pdClient, cluster, err := unistore.New("") - c.Assert(err, IsNil) - unistore.BootstrapWithSingleStore(cluster) - store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0) - c.Assert(err, IsNil) - s.store = store -} - -func (s *testPrewriteSuite) TestSetMinCommitTSInAsyncCommit(c *C) { - t, err := s.store.Begin() - c.Assert(err, IsNil) - txn := tikv.TxnProbe{KVTxn: t} - err = txn.Set([]byte("k"), []byte("v")) - c.Assert(err, IsNil) - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - committer.SetUseAsyncCommit() - - buildRequest := func() *kvrpcpb.PrewriteRequest { - req := committer.BuildPrewriteRequest(1, 1, 1, committer.GetMutations(), 1) - return req.Req.(*kvrpcpb.PrewriteRequest) - } - - // no forUpdateTS - req := buildRequest() - c.Assert(req.MinCommitTs, Equals, txn.StartTS()+1) - - // forUpdateTS is set - committer.SetForUpdateTS(txn.StartTS() + (5 << 18)) - req = buildRequest() - c.Assert(req.MinCommitTs, Equals, committer.GetForUpdateTS()+1) - - // minCommitTS is set - committer.SetMinCommitTS(txn.StartTS() + (10 << 18)) - req = buildRequest() - c.Assert(req.MinCommitTs, Equals, committer.GetMinCommitTS()) - -} diff --git a/store/tikv/tests/range_task_test.go b/store/tikv/tests/range_task_test.go deleted file mode 100644 index 57c596340a6fb..0000000000000 --- a/store/tikv/tests/range_task_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "errors" - "sort" - - . "github.com/pingcap/check" - "github.com/pingcap/tidb/store/mockstore/mockcopr" - "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/mockstore/cluster" - "github.com/tikv/client-go/v2/mockstore/mocktikv" - "github.com/tikv/client-go/v2/tikv" -) - -type testRangeTaskSuite struct { - OneByOneSuite - cluster cluster.Cluster - store *tikv.KVStore - - testRanges []kv.KeyRange - expectedRanges [][]kv.KeyRange -} - -var _ = Suite(&testRangeTaskSuite{}) - -func makeRange(startKey string, endKey string) kv.KeyRange { - return kv.KeyRange{ - StartKey: []byte(startKey), - EndKey: []byte(endKey), - } -} - -func (s *testRangeTaskSuite) SetUpTest(c *C) { - // Split the store at "a" to "z" - splitKeys := make([][]byte, 0) - for k := byte('a'); k <= byte('z'); k++ { - splitKeys = append(splitKeys, []byte{k}) - } - - // Calculate all region's ranges - allRegionRanges := []kv.KeyRange{makeRange("", "a")} - for i := 0; i < len(splitKeys)-1; i++ { - allRegionRanges = append(allRegionRanges, kv.KeyRange{ - StartKey: splitKeys[i], - EndKey: splitKeys[i+1], - }) - } - allRegionRanges = append(allRegionRanges, makeRange("z", "")) - - client, cluster, pdClient, err := mocktikv.NewTiKVAndPDClient("", mockcopr.NewCoprRPCHandler()) - c.Assert(err, IsNil) - mocktikv.BootstrapWithMultiRegions(cluster, splitKeys...) - s.cluster = cluster - - store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0) - c.Assert(err, IsNil) - - // TODO: make this possible - // store, err := mockstore.NewMockStore( - // mockstore.WithStoreType(mockstore.MockTiKV), - // mockstore.WithClusterInspector(func(c cluster.Cluster) { - // mockstore.BootstrapWithMultiRegions(c, splitKeys...) - // s.cluster = c - // }), - // ) - // c.Assert(err, IsNil) - s.store = store - - s.testRanges = []kv.KeyRange{ - makeRange("", ""), - makeRange("", "b"), - makeRange("b", ""), - makeRange("b", "x"), - makeRange("a", "d"), - makeRange("a\x00", "d\x00"), - makeRange("a\xff\xff\xff", "c\xff\xff\xff"), - makeRange("a1", "a2"), - makeRange("a", "a"), - makeRange("a3", "a3"), - } - - s.expectedRanges = [][]kv.KeyRange{ - allRegionRanges, - allRegionRanges[:2], - allRegionRanges[2:], - allRegionRanges[2:24], - { - makeRange("a", "b"), - makeRange("b", "c"), - makeRange("c", "d"), - }, - { - makeRange("a\x00", "b"), - makeRange("b", "c"), - makeRange("c", "d"), - makeRange("d", "d\x00"), - }, - { - makeRange("a\xff\xff\xff", "b"), - makeRange("b", "c"), - makeRange("c", "c\xff\xff\xff"), - }, - { - makeRange("a1", "a2"), - }, - {}, - {}, - } -} - -func (s *testRangeTaskSuite) TearDownTest(c *C) { - err := s.store.Close() - c.Assert(err, IsNil) -} - -func collect(c chan *kv.KeyRange) []kv.KeyRange { - c <- nil - ranges := make([]kv.KeyRange, 0) - - for { - r := <-c - if r == nil { - break - } - - ranges = append(ranges, *r) - } - return ranges -} - -func (s *testRangeTaskSuite) checkRanges(c *C, obtained []kv.KeyRange, expected []kv.KeyRange) { - sort.Slice(obtained, func(i, j int) bool { - return bytes.Compare(obtained[i].StartKey, obtained[j].StartKey) < 0 - }) - - c.Assert(obtained, DeepEquals, expected) -} - -func batchRanges(ranges []kv.KeyRange, batchSize int) []kv.KeyRange { - result := make([]kv.KeyRange, 0, len(ranges)) - - for i := 0; i < len(ranges); i += batchSize { - lastRange := i + batchSize - 1 - if lastRange >= len(ranges) { - lastRange = len(ranges) - 1 - } - - result = append(result, kv.KeyRange{ - StartKey: ranges[i].StartKey, - EndKey: ranges[lastRange].EndKey, - }) - } - - return result -} - -func (s *testRangeTaskSuite) testRangeTaskImpl(c *C, concurrency int) { - c.Logf("Test RangeTask, concurrency: %v", concurrency) - - ranges := make(chan *kv.KeyRange, 100) - - handler := func(ctx context.Context, r kv.KeyRange) (tikv.RangeTaskStat, error) { - ranges <- &r - stat := tikv.RangeTaskStat{ - CompletedRegions: 1, - } - return stat, nil - } - - runner := tikv.NewRangeTaskRunner("test-runner", s.store, concurrency, handler) - - for regionsPerTask := 1; regionsPerTask <= 5; regionsPerTask++ { - for i, r := range s.testRanges { - runner.SetRegionsPerTask(regionsPerTask) - - expectedRanges := batchRanges(s.expectedRanges[i], regionsPerTask) - - err := runner.RunOnRange(context.Background(), r.StartKey, r.EndKey) - c.Assert(err, IsNil) - s.checkRanges(c, collect(ranges), expectedRanges) - c.Assert(runner.CompletedRegions(), Equals, len(expectedRanges)) - c.Assert(runner.FailedRegions(), Equals, 0) - } - } -} - -func (s *testRangeTaskSuite) TestRangeTask(c *C) { - for concurrency := 1; concurrency < 5; concurrency++ { - s.testRangeTaskImpl(c, concurrency) - } -} - -func (s *testRangeTaskSuite) testRangeTaskErrorImpl(c *C, concurrency int) { - for i, r := range s.testRanges { - // Iterate all sub tasks and make it an error - subRanges := s.expectedRanges[i] - for _, subRange := range subRanges { - errKey := subRange.StartKey - c.Logf("Test RangeTask Error concurrency: %v, range: [%+q, %+q), errKey: %+q", concurrency, r.StartKey, r.EndKey, errKey) - - handler := func(ctx context.Context, r kv.KeyRange) (tikv.RangeTaskStat, error) { - stat := tikv.RangeTaskStat{CompletedRegions: 0, FailedRegions: 0} - if bytes.Equal(r.StartKey, errKey) { - stat.FailedRegions++ - return stat, errors.New("test error") - - } - stat.CompletedRegions++ - return stat, nil - } - - runner := tikv.NewRangeTaskRunner("test-error-runner", s.store, concurrency, handler) - runner.SetRegionsPerTask(1) - err := runner.RunOnRange(context.Background(), r.StartKey, r.EndKey) - // RunOnRange returns no error only when all sub tasks are done successfully. - c.Assert(err, NotNil) - c.Assert(runner.CompletedRegions(), Less, len(subRanges)) - c.Assert(runner.FailedRegions(), Equals, 1) - } - } -} - -func (s *testRangeTaskSuite) TestRangeTaskError(c *C) { - for concurrency := 1; concurrency < 5; concurrency++ { - s.testRangeTaskErrorImpl(c, concurrency) - } -} diff --git a/store/tikv/tests/rawkv_test.go b/store/tikv/tests/rawkv_test.go deleted file mode 100644 index 7d53a31f2a0ca..0000000000000 --- a/store/tikv/tests/rawkv_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "fmt" - - . "github.com/pingcap/check" - "github.com/pingcap/tidb/store/mockstore/unistore" - "github.com/tikv/client-go/v2/mockstore/cluster" - "github.com/tikv/client-go/v2/tikv" -) - -type testRawKVSuite struct { - OneByOneSuite - cluster cluster.Cluster - client tikv.RawKVClientProbe - bo *tikv.Backoffer -} - -var _ = Suite(&testRawKVSuite{}) - -func (s *testRawKVSuite) SetUpTest(c *C) { - client, pdClient, cluster, err := unistore.New("") - c.Assert(err, IsNil) - unistore.BootstrapWithSingleStore(cluster) - s.cluster = cluster - s.client = tikv.RawKVClientProbe{RawKVClient: &tikv.RawKVClient{}} - s.client.SetPDClient(pdClient) - s.client.SetRegionCache(tikv.NewRegionCache(pdClient)) - s.client.SetRPCClient(client) - s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil) -} - -func (s *testRawKVSuite) TearDownTest(c *C) { - s.client.Close() -} - -func (s *testRawKVSuite) mustNotExist(c *C, key []byte) { - v, err := s.client.Get(key) - c.Assert(err, IsNil) - c.Assert(v, IsNil) -} - -func (s *testRawKVSuite) mustBatchNotExist(c *C, keys [][]byte) { - values, err := s.client.BatchGet(keys) - c.Assert(err, IsNil) - c.Assert(values, NotNil) - c.Assert(len(keys), Equals, len(values)) - for _, value := range values { - c.Assert([]byte{}, BytesEquals, value) - } -} - -func (s *testRawKVSuite) mustGet(c *C, key, value []byte) { - v, err := s.client.Get(key) - c.Assert(err, IsNil) - c.Assert(v, NotNil) - c.Assert(v, BytesEquals, value) -} - -func (s *testRawKVSuite) mustBatchGet(c *C, keys, values [][]byte) { - checkValues, err := s.client.BatchGet(keys) - c.Assert(err, IsNil) - c.Assert(checkValues, NotNil) - c.Assert(len(keys), Equals, len(checkValues)) - for i := range keys { - c.Check(values[i], BytesEquals, checkValues[i]) - } -} - -func (s *testRawKVSuite) mustPut(c *C, key, value []byte) { - err := s.client.Put(key, value) - c.Assert(err, IsNil) -} - -func (s *testRawKVSuite) mustBatchPut(c *C, keys, values [][]byte) { - err := s.client.BatchPut(keys, values) - c.Assert(err, IsNil) -} - -func (s *testRawKVSuite) mustDelete(c *C, key []byte) { - err := s.client.Delete(key) - c.Assert(err, IsNil) -} - -func (s *testRawKVSuite) mustBatchDelete(c *C, keys [][]byte) { - err := s.client.BatchDelete(keys) - c.Assert(err, IsNil) -} - -func (s *testRawKVSuite) mustScan(c *C, startKey string, limit int, expect ...string) { - keys, values, err := s.client.Scan([]byte(startKey), nil, limit) - c.Assert(err, IsNil) - c.Assert(len(keys)*2, Equals, len(expect)) - for i := range keys { - c.Assert(string(keys[i]), Equals, expect[i*2]) - c.Assert(string(values[i]), Equals, expect[i*2+1]) - } -} - -func (s *testRawKVSuite) mustScanRange(c *C, startKey string, endKey string, limit int, expect ...string) { - keys, values, err := s.client.Scan([]byte(startKey), []byte(endKey), limit) - c.Assert(err, IsNil) - c.Assert(len(keys)*2, Equals, len(expect)) - for i := range keys { - c.Assert(string(keys[i]), Equals, expect[i*2]) - c.Assert(string(values[i]), Equals, expect[i*2+1]) - } -} - -func (s *testRawKVSuite) mustReverseScan(c *C, startKey []byte, limit int, expect ...string) { - keys, values, err := s.client.ReverseScan(startKey, nil, limit) - c.Assert(err, IsNil) - c.Assert(len(keys)*2, Equals, len(expect)) - for i := range keys { - c.Assert(string(keys[i]), Equals, expect[i*2]) - c.Assert(string(values[i]), Equals, expect[i*2+1]) - } -} - -func (s *testRawKVSuite) mustReverseScanRange(c *C, startKey, endKey []byte, limit int, expect ...string) { - keys, values, err := s.client.ReverseScan(startKey, endKey, limit) - c.Assert(err, IsNil) - c.Assert(len(keys)*2, Equals, len(expect)) - for i := range keys { - c.Assert(string(keys[i]), Equals, expect[i*2]) - c.Assert(string(values[i]), Equals, expect[i*2+1]) - } -} - -func (s *testRawKVSuite) mustDeleteRange(c *C, startKey, endKey []byte, expected map[string]string) { - err := s.client.DeleteRange(startKey, endKey) - c.Assert(err, IsNil) - - for keyStr := range expected { - key := []byte(keyStr) - if bytes.Compare(startKey, key) <= 0 && bytes.Compare(key, endKey) < 0 { - delete(expected, keyStr) - } - } - - s.checkData(c, expected) -} - -func (s *testRawKVSuite) checkData(c *C, expected map[string]string) { - keys, values, err := s.client.Scan([]byte(""), nil, len(expected)+1) - c.Assert(err, IsNil) - - c.Assert(len(expected), Equals, len(keys)) - for i, key := range keys { - c.Assert(expected[string(key)], Equals, string(values[i])) - } -} - -func (s *testRawKVSuite) split(c *C, regionKey, splitKey string) error { - loc, err := s.client.GetRegionCache().LocateKey(s.bo, []byte(regionKey)) - if err != nil { - return err - } - - newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID() - s.cluster.SplitRaw(loc.Region.GetID(), newRegionID, []byte(splitKey), []uint64{peerID}, peerID) - return nil -} - -func (s *testRawKVSuite) TestSimple(c *C) { - s.mustNotExist(c, []byte("key")) - s.mustPut(c, []byte("key"), []byte("value")) - s.mustGet(c, []byte("key"), []byte("value")) - s.mustDelete(c, []byte("key")) - s.mustNotExist(c, []byte("key")) - err := s.client.Put([]byte("key"), []byte("")) - c.Assert(err, NotNil) -} - -func (s *testRawKVSuite) TestRawBatch(c *C) { - testNum := 0 - size := 0 - var testKeys [][]byte - var testValues [][]byte - for i := 0; size/(tikv.ConfigProbe{}.GetRawBatchPutSize()) < 4; i++ { - key := fmt.Sprint("key", i) - size += len(key) - testKeys = append(testKeys, []byte(key)) - value := fmt.Sprint("value", i) - size += len(value) - testValues = append(testValues, []byte(value)) - s.mustNotExist(c, []byte(key)) - testNum = i - } - err := s.split(c, "", fmt.Sprint("key", testNum/2)) - c.Assert(err, IsNil) - s.mustBatchPut(c, testKeys, testValues) - s.mustBatchGet(c, testKeys, testValues) - s.mustBatchDelete(c, testKeys) - s.mustBatchNotExist(c, testKeys) -} - -func (s *testRawKVSuite) TestSplit(c *C) { - s.mustPut(c, []byte("k1"), []byte("v1")) - s.mustPut(c, []byte("k3"), []byte("v3")) - - err := s.split(c, "k", "k2") - c.Assert(err, IsNil) - - s.mustGet(c, []byte("k1"), []byte("v1")) - s.mustGet(c, []byte("k3"), []byte("v3")) -} - -func (s *testRawKVSuite) TestScan(c *C) { - s.mustPut(c, []byte("k1"), []byte("v1")) - s.mustPut(c, []byte("k3"), []byte("v3")) - s.mustPut(c, []byte("k5"), []byte("v5")) - s.mustPut(c, []byte("k7"), []byte("v7")) - - check := func() { - s.mustScan(c, "", 1, "k1", "v1") - s.mustScan(c, "k1", 2, "k1", "v1", "k3", "v3") - s.mustScan(c, "", 10, "k1", "v1", "k3", "v3", "k5", "v5", "k7", "v7") - s.mustScan(c, "k2", 2, "k3", "v3", "k5", "v5") - s.mustScan(c, "k2", 3, "k3", "v3", "k5", "v5", "k7", "v7") - s.mustScanRange(c, "", "k1", 1) - s.mustScanRange(c, "k1", "k3", 2, "k1", "v1") - s.mustScanRange(c, "k1", "k5", 10, "k1", "v1", "k3", "v3") - s.mustScanRange(c, "k1", "k5\x00", 10, "k1", "v1", "k3", "v3", "k5", "v5") - s.mustScanRange(c, "k5\x00", "k5\x00\x00", 10) - } - - check() - - err := s.split(c, "k", "k2") - c.Assert(err, IsNil) - check() - - err = s.split(c, "k2", "k5") - c.Assert(err, IsNil) - check() -} - -func (s *testRawKVSuite) TestReverseScan(c *C) { - s.mustPut(c, []byte("k1"), []byte("v1")) - s.mustPut(c, []byte("k3"), []byte("v3")) - s.mustPut(c, []byte("k5"), []byte("v5")) - s.mustPut(c, []byte("k7"), []byte("v7")) - - s.checkReverseScan(c) - - err := s.split(c, "k", "k2") - c.Assert(err, IsNil) - s.checkReverseScan(c) - - err = s.split(c, "k2", "k5") - c.Assert(err, IsNil) - s.checkReverseScan(c) -} - -func (s *testRawKVSuite) checkReverseScan(c *C) { - s.mustReverseScan(c, []byte(""), 10) - s.mustReverseScan(c, []byte("z"), 1, "k7", "v7") - s.mustReverseScan(c, []byte("z"), 2, "k7", "v7", "k5", "v5") - s.mustReverseScan(c, []byte("z"), 10, "k7", "v7", "k5", "v5", "k3", "v3", "k1", "v1") - s.mustReverseScan(c, []byte("k2"), 10, "k1", "v1") - s.mustReverseScan(c, []byte("k6"), 2, "k5", "v5", "k3", "v3") - s.mustReverseScan(c, []byte("k5"), 1, "k3", "v3") - s.mustReverseScan(c, append([]byte("k5"), 0), 1, "k5", "v5") - s.mustReverseScan(c, []byte("k6"), 3, "k5", "v5", "k3", "v3", "k1", "v1") - - s.mustReverseScanRange(c, []byte("z"), []byte("k3"), 10, "k7", "v7", "k5", "v5", "k3", "v3") - s.mustReverseScanRange(c, []byte("k7"), append([]byte("k3"), 0), 10, "k5", "v5") -} - -func (s *testRawKVSuite) TestDeleteRange(c *C) { - // Init data - testData := map[string]string{} - for _, i := range []byte("abcd") { - for j := byte('0'); j <= byte('9'); j++ { - key := []byte{i, j} - value := []byte{'v', i, j} - s.mustPut(c, key, value) - - testData[string(key)] = string(value) - } - } - - err := s.split(c, "b", "b") - c.Assert(err, IsNil) - err = s.split(c, "c", "c") - c.Assert(err, IsNil) - err = s.split(c, "d", "d") - c.Assert(err, IsNil) - - s.checkData(c, testData) - s.mustDeleteRange(c, []byte("b"), []byte("c0"), testData) - s.mustDeleteRange(c, []byte("c11"), []byte("c12"), testData) - s.mustDeleteRange(c, []byte("d0"), []byte("d0"), testData) - s.mustDeleteRange(c, []byte("c5"), []byte("d5"), testData) - s.mustDeleteRange(c, []byte("a"), []byte("z"), testData) -} diff --git a/store/tikv/tests/safepoint_test.go b/store/tikv/tests/safepoint_test.go deleted file mode 100644 index 190457ceb7514..0000000000000 --- a/store/tikv/tests/safepoint_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2017 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "fmt" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/parser/terror" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/tikv" -) - -type testSafePointSuite struct { - OneByOneSuite - store tikv.StoreProbe - prefix string -} - -var _ = Suite(&testSafePointSuite{}) - -func (s *testSafePointSuite) SetUpSuite(c *C) { - s.OneByOneSuite.SetUpSuite(c) - s.store = tikv.StoreProbe{KVStore: NewTestStore(c)} - s.prefix = fmt.Sprintf("seek_%d", time.Now().Unix()) -} - -func (s *testSafePointSuite) TearDownSuite(c *C) { - err := s.store.Close() - c.Assert(err, IsNil) - s.OneByOneSuite.TearDownSuite(c) -} - -func (s *testSafePointSuite) beginTxn(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return txn -} - -func mymakeKeys(rowNum int, prefix string) [][]byte { - keys := make([][]byte, 0, rowNum) - for i := 0; i < rowNum; i++ { - k := encodeKey(prefix, s08d("key", i)) - keys = append(keys, k) - } - return keys -} - -func (s *testSafePointSuite) waitUntilErrorPlugIn(t uint64) { - for { - s.store.SaveSafePoint(t + 10) - cachedTime := time.Now() - newSafePoint, err := s.store.LoadSafePoint() - if err == nil { - s.store.UpdateSPCache(newSafePoint, cachedTime) - break - } - time.Sleep(time.Second) - } -} - -func (s *testSafePointSuite) TestSafePoint(c *C) { - txn := s.beginTxn(c) - for i := 0; i < 10; i++ { - err := txn.Set(encodeKey(s.prefix, s08d("key", i)), valueBytes(i)) - c.Assert(err, IsNil) - } - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) - - // for txn get - txn2 := s.beginTxn(c) - _, err = txn2.Get(context.TODO(), encodeKey(s.prefix, s08d("key", 0))) - c.Assert(err, IsNil) - - s.waitUntilErrorPlugIn(txn2.StartTS()) - - _, geterr2 := txn2.Get(context.TODO(), encodeKey(s.prefix, s08d("key", 0))) - c.Assert(geterr2, NotNil) - - _, isFallBehind := errors.Cause(geterr2).(*tikverr.ErrGCTooEarly) - isMayFallBehind := terror.ErrorEqual(errors.Cause(geterr2), tikverr.NewErrPDServerTimeout("start timestamp may fall behind safe point")) - isBehind := isFallBehind || isMayFallBehind - c.Assert(isBehind, IsTrue) - - // for txn seek - txn3 := s.beginTxn(c) - - s.waitUntilErrorPlugIn(txn3.StartTS()) - - _, seekerr := txn3.Iter(encodeKey(s.prefix, ""), nil) - c.Assert(seekerr, NotNil) - _, isFallBehind = errors.Cause(geterr2).(*tikverr.ErrGCTooEarly) - isMayFallBehind = terror.ErrorEqual(errors.Cause(geterr2), tikverr.NewErrPDServerTimeout("start timestamp may fall behind safe point")) - isBehind = isFallBehind || isMayFallBehind - c.Assert(isBehind, IsTrue) - - // for snapshot batchGet - keys := mymakeKeys(10, s.prefix) - txn4 := s.beginTxn(c) - - s.waitUntilErrorPlugIn(txn4.StartTS()) - - _, batchgeterr := toTiDBTxn(&txn4).BatchGet(context.Background(), toTiDBKeys(keys)) - c.Assert(batchgeterr, NotNil) - _, isFallBehind = errors.Cause(geterr2).(*tikverr.ErrGCTooEarly) - isMayFallBehind = terror.ErrorEqual(errors.Cause(geterr2), tikverr.NewErrPDServerTimeout("start timestamp may fall behind safe point")) - isBehind = isFallBehind || isMayFallBehind - c.Assert(isBehind, IsTrue) -} diff --git a/store/tikv/tests/scan_mock_test.go b/store/tikv/tests/scan_mock_test.go deleted file mode 100644 index d7dd2087d1c49..0000000000000 --- a/store/tikv/tests/scan_mock_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - - . "github.com/pingcap/check" - "github.com/tikv/client-go/v2/tikv" -) - -type testScanMockSuite struct { - OneByOneSuite -} - -var _ = Suite(&testScanMockSuite{}) - -func (s *testScanMockSuite) TestScanMultipleRegions(c *C) { - store := tikv.StoreProbe{KVStore: NewTestStore(c)} - defer store.Close() - - txn, err := store.Begin() - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - err = txn.Set([]byte{ch}, []byte{ch}) - c.Assert(err, IsNil) - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - txn, err = store.Begin() - c.Assert(err, IsNil) - scanner, err := txn.NewScanner([]byte("a"), nil, 10, false) - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - c.Assert([]byte{ch}, BytesEquals, scanner.Key()) - c.Assert(scanner.Next(), IsNil) - } - c.Assert(scanner.Valid(), IsFalse) - - scanner, err = txn.NewScanner([]byte("a"), []byte("i"), 10, false) - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('h'); ch++ { - c.Assert([]byte{ch}, BytesEquals, scanner.Key()) - c.Assert(scanner.Next(), IsNil) - } - c.Assert(scanner.Valid(), IsFalse) -} - -func (s *testScanMockSuite) TestReverseScan(c *C) { - store := tikv.StoreProbe{KVStore: NewTestStore(c)} - defer store.Close() - - txn, err := store.Begin() - c.Assert(err, IsNil) - for ch := byte('a'); ch <= byte('z'); ch++ { - err = txn.Set([]byte{ch}, []byte{ch}) - c.Assert(err, IsNil) - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - txn, err = store.Begin() - c.Assert(err, IsNil) - scanner, err := txn.NewScanner(nil, []byte("z"), 10, true) - c.Assert(err, IsNil) - for ch := byte('y'); ch >= byte('a'); ch-- { - c.Assert(string([]byte{ch}), Equals, string(scanner.Key())) - c.Assert(scanner.Next(), IsNil) - } - c.Assert(scanner.Valid(), IsFalse) - - scanner, err = txn.NewScanner([]byte("a"), []byte("i"), 10, true) - c.Assert(err, IsNil) - for ch := byte('h'); ch >= byte('a'); ch-- { - c.Assert(string([]byte{ch}), Equals, string(scanner.Key())) - c.Assert(scanner.Next(), IsNil) - } - c.Assert(scanner.Valid(), IsFalse) -} diff --git a/store/tikv/tests/scan_test.go b/store/tikv/tests/scan_test.go deleted file mode 100644 index 696ce2b4df6d3..0000000000000 --- a/store/tikv/tests/scan_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "bytes" - "context" - "fmt" - - . "github.com/pingcap/check" - "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/logutil" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/unionstore" - "github.com/tikv/client-go/v2/util" - "go.uber.org/zap" -) - -var scanBatchSize = tikv.ConfigProbe{}.GetScanBatchSize() - -type testScanSuite struct { - OneByOneSuite - store *tikv.KVStore - recordPrefix []byte - rowNums []int - ctx context.Context -} - -var _ = SerialSuites(&testScanSuite{}) - -func (s *testScanSuite) SetUpSuite(c *C) { - s.OneByOneSuite.SetUpSuite(c) - s.store = NewTestStore(c) - s.recordPrefix = []byte("prefix") - s.rowNums = append(s.rowNums, 1, scanBatchSize, scanBatchSize+1, scanBatchSize*3) - // Avoid using async commit logic. - s.ctx = context.WithValue(context.Background(), util.SessionID, uint64(0)) -} - -func (s *testScanSuite) TearDownSuite(c *C) { - txn := s.beginTxn(c) - scanner, err := txn.Iter(s.recordPrefix, nil) - c.Assert(err, IsNil) - c.Assert(scanner, NotNil) - for scanner.Valid() { - k := scanner.Key() - err = txn.Delete(k) - c.Assert(err, IsNil) - scanner.Next() - } - err = txn.Commit(s.ctx) - c.Assert(err, IsNil) - err = s.store.Close() - c.Assert(err, IsNil) - s.OneByOneSuite.TearDownSuite(c) -} - -func (s *testScanSuite) beginTxn(c *C) *tikv.KVTxn { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return txn -} - -func (s *testScanSuite) makeKey(i int) []byte { - var key []byte - key = append(key, s.recordPrefix...) - key = append(key, []byte(fmt.Sprintf("%10d", i))...) - return key -} - -func (s *testScanSuite) makeValue(i int) []byte { - return []byte(fmt.Sprintf("%d", i)) -} - -func (s *testScanSuite) TestScan(c *C) { - check := func(c *C, scan unionstore.Iterator, rowNum int, keyOnly bool) { - for i := 0; i < rowNum; i++ { - k := scan.Key() - expectedKey := s.makeKey(i) - if ok := bytes.Equal(k, expectedKey); !ok { - logutil.BgLogger().Error("bytes equal check fail", - zap.Int("i", i), - zap.Int("rowNum", rowNum), - zap.String("obtained key", kv.StrKey(k)), - zap.String("obtained val", kv.StrKey(scan.Value())), - zap.String("expected", kv.StrKey(expectedKey)), - zap.Bool("keyOnly", keyOnly)) - } - c.Assert(k, BytesEquals, expectedKey) - if !keyOnly { - v := scan.Value() - c.Assert(v, BytesEquals, s.makeValue(i)) - } - // Because newScan return first item without calling scan.Next() just like go-hbase, - // for-loop count will decrease 1. - if i < rowNum-1 { - scan.Next() - } - } - scan.Next() - c.Assert(scan.Valid(), IsFalse) - } - - for _, rowNum := range s.rowNums { - txn := s.beginTxn(c) - for i := 0; i < rowNum; i++ { - err := txn.Set(s.makeKey(i), s.makeValue(i)) - c.Assert(err, IsNil) - } - err := txn.Commit(s.ctx) - c.Assert(err, IsNil) - mockTableID := int64(999) - if rowNum > 123 { - _, err = s.store.SplitRegions(s.ctx, [][]byte{s.makeKey(123)}, false, &mockTableID) - c.Assert(err, IsNil) - } - - if rowNum > 456 { - _, err = s.store.SplitRegions(s.ctx, [][]byte{s.makeKey(456)}, false, &mockTableID) - c.Assert(err, IsNil) - } - - txn2 := s.beginTxn(c) - val, err := txn2.Get(context.TODO(), s.makeKey(0)) - c.Assert(err, IsNil) - c.Assert(val, BytesEquals, s.makeValue(0)) - // Test scan without upperBound - scan, err := txn2.Iter(s.recordPrefix, nil) - c.Assert(err, IsNil) - check(c, scan, rowNum, false) - // Test scan with upperBound - upperBound := rowNum / 2 - scan, err = txn2.Iter(s.recordPrefix, s.makeKey(upperBound)) - c.Assert(err, IsNil) - check(c, scan, upperBound, false) - - txn3 := s.beginTxn(c) - txn3.GetSnapshot().SetKeyOnly(true) - // Test scan without upper bound - scan, err = txn3.Iter(s.recordPrefix, nil) - c.Assert(err, IsNil) - check(c, scan, rowNum, true) - // test scan with upper bound - scan, err = txn3.Iter(s.recordPrefix, s.makeKey(upperBound)) - c.Assert(err, IsNil) - check(c, scan, upperBound, true) - - // Restore KeyOnly to false - txn3.GetSnapshot().SetKeyOnly(false) - scan, err = txn3.Iter(s.recordPrefix, nil) - c.Assert(err, IsNil) - check(c, scan, rowNum, true) - // test scan with upper bound - scan, err = txn3.Iter(s.recordPrefix, s.makeKey(upperBound)) - c.Assert(err, IsNil) - check(c, scan, upperBound, true) - } -} diff --git a/store/tikv/tests/snapshot_fail_test.go b/store/tikv/tests/snapshot_fail_test.go deleted file mode 100644 index eb0625566e7df..0000000000000 --- a/store/tikv/tests/snapshot_fail_test.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "math" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/store/mockstore/unistore" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/mockstore" - "github.com/tikv/client-go/v2/tikv" -) - -type testSnapshotFailSuite struct { - OneByOneSuite - store tikv.StoreProbe -} - -var _ = SerialSuites(&testSnapshotFailSuite{}) - -func (s *testSnapshotFailSuite) SetUpSuite(c *C) { - s.OneByOneSuite.SetUpSuite(c) - client, pdClient, cluster, err := unistore.New("") - c.Assert(err, IsNil) - unistore.BootstrapWithSingleStore(cluster) - store, err := tikv.NewTestTiKVStore(fpClient{Client: client}, pdClient, nil, nil, 0) - c.Assert(err, IsNil) - s.store = tikv.StoreProbe{KVStore: store} -} - -func (s *testSnapshotFailSuite) TearDownSuite(c *C) { - s.OneByOneSuite.TearDownSuite(c) -} - -func (s *testSnapshotFailSuite) cleanup(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - iter, err := txn.Iter([]byte(""), []byte("")) - c.Assert(err, IsNil) - for iter.Valid() { - err = txn.Delete(iter.Key()) - c.Assert(err, IsNil) - err = iter.Next() - c.Assert(err, IsNil) - } - c.Assert(txn.Commit(context.TODO()), IsNil) -} - -func (s *testSnapshotFailSuite) TestBatchGetResponseKeyError(c *C) { - // Meaningless to test with tikv because it has a mock key error - if *mockstore.WithTiKV { - return - } - defer s.cleanup(c) - - // Put two KV pairs - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = txn.Set([]byte("k1"), []byte("v1")) - c.Assert(err, IsNil) - err = txn.Set([]byte("k2"), []byte("v2")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcBatchGetResult", `1*return("keyError")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("tikvclient/rpcBatchGetResult"), IsNil) - }() - - txn, err = s.store.Begin() - c.Assert(err, IsNil) - res, err := toTiDBTxn(&txn).BatchGet(context.Background(), toTiDBKeys([][]byte{[]byte("k1"), []byte("k2")})) - c.Assert(err, IsNil) - c.Assert(res, DeepEquals, map[string][]byte{"k1": []byte("v1"), "k2": []byte("v2")}) -} - -func (s *testSnapshotFailSuite) TestScanResponseKeyError(c *C) { - // Meaningless to test with tikv because it has a mock key error - if *mockstore.WithTiKV { - return - } - defer s.cleanup(c) - - // Put two KV pairs - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = txn.Set([]byte("k1"), []byte("v1")) - c.Assert(err, IsNil) - err = txn.Set([]byte("k2"), []byte("v2")) - c.Assert(err, IsNil) - err = txn.Set([]byte("k3"), []byte("v3")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcScanResult", `1*return("keyError")`), IsNil) - txn, err = s.store.Begin() - c.Assert(err, IsNil) - iter, err := txn.Iter([]byte("a"), []byte("z")) - c.Assert(err, IsNil) - c.Assert(iter.Key(), DeepEquals, []byte("k1")) - c.Assert(iter.Value(), DeepEquals, []byte("v1")) - c.Assert(iter.Next(), IsNil) - c.Assert(iter.Key(), DeepEquals, []byte("k2")) - c.Assert(iter.Value(), DeepEquals, []byte("v2")) - c.Assert(iter.Next(), IsNil) - c.Assert(iter.Key(), DeepEquals, []byte("k3")) - c.Assert(iter.Value(), DeepEquals, []byte("v3")) - c.Assert(iter.Next(), IsNil) - c.Assert(iter.Valid(), IsFalse) - c.Assert(failpoint.Disable("tikvclient/rpcScanResult"), IsNil) - - c.Assert(failpoint.Enable("tikvclient/rpcScanResult", `1*return("keyError")`), IsNil) - txn, err = s.store.Begin() - c.Assert(err, IsNil) - iter, err = txn.Iter([]byte("k2"), []byte("k4")) - c.Assert(err, IsNil) - c.Assert(iter.Key(), DeepEquals, []byte("k2")) - c.Assert(iter.Value(), DeepEquals, []byte("v2")) - c.Assert(iter.Next(), IsNil) - c.Assert(iter.Key(), DeepEquals, []byte("k3")) - c.Assert(iter.Value(), DeepEquals, []byte("v3")) - c.Assert(iter.Next(), IsNil) - c.Assert(iter.Valid(), IsFalse) - c.Assert(failpoint.Disable("tikvclient/rpcScanResult"), IsNil) -} - -func (s *testSnapshotFailSuite) TestRetryMaxTsPointGetSkipLock(c *C) { - defer s.cleanup(c) - - // Prewrite k1 and k2 with async commit but don't commit them - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = txn.Set([]byte("k1"), []byte("v1")) - c.Assert(err, IsNil) - err = txn.Set([]byte("k2"), []byte("v2")) - c.Assert(err, IsNil) - txn.SetEnableAsyncCommit(true) - - c.Assert(failpoint.Enable("tikvclient/asyncCommitDoNothing", "return"), IsNil) - c.Assert(failpoint.Enable("tikvclient/twoPCShortLockTTL", "return"), IsNil) - committer, err := txn.NewCommitter(1) - c.Assert(err, IsNil) - err = committer.Execute(context.Background()) - c.Assert(err, IsNil) - c.Assert(failpoint.Disable("tikvclient/twoPCShortLockTTL"), IsNil) - - snapshot := s.store.GetSnapshot(math.MaxUint64) - getCh := make(chan []byte) - go func() { - // Sleep a while to make the TTL of the first txn expire, then we make sure we resolve lock by this get - time.Sleep(200 * time.Millisecond) - c.Assert(failpoint.Enable("tikvclient/beforeSendPointGet", "1*off->pause"), IsNil) - res, err := snapshot.Get(context.Background(), []byte("k2")) - c.Assert(err, IsNil) - getCh <- res - }() - // The get should be blocked by the failpoint. But the lock should have been resolved. - select { - case res := <-getCh: - c.Errorf("too early %s", string(res)) - case <-time.After(1 * time.Second): - } - - // Prewrite k1 and k2 again without committing them - txn, err = s.store.Begin() - c.Assert(err, IsNil) - txn.SetEnableAsyncCommit(true) - err = txn.Set([]byte("k1"), []byte("v3")) - c.Assert(err, IsNil) - err = txn.Set([]byte("k2"), []byte("v4")) - c.Assert(err, IsNil) - committer, err = txn.NewCommitter(1) - c.Assert(err, IsNil) - err = committer.Execute(context.Background()) - c.Assert(err, IsNil) - - c.Assert(failpoint.Disable("tikvclient/beforeSendPointGet"), IsNil) - - // After disabling the failpoint, the get request should bypass the new locks and read the old result - select { - case res := <-getCh: - c.Assert(res, DeepEquals, []byte("v2")) - case <-time.After(1 * time.Second): - c.Errorf("get timeout") - } -} - -func (s *testSnapshotFailSuite) TestRetryPointGetResolveTS(c *C) { - defer s.cleanup(c) - - txn, err := s.store.Begin() - c.Assert(err, IsNil) - c.Assert(txn.Set([]byte("k1"), []byte("v1")), IsNil) - err = txn.Set([]byte("k2"), []byte("v2")) - c.Assert(err, IsNil) - txn.SetEnableAsyncCommit(false) - txn.SetEnable1PC(false) - txn.SetCausalConsistency(true) - - // Prewrite the lock without committing it - c.Assert(failpoint.Enable("tikvclient/beforeCommit", `pause`), IsNil) - ch := make(chan struct{}) - committer, err := txn.NewCommitter(1) - c.Assert(committer.GetPrimaryKey(), DeepEquals, []byte("k1")) - go func() { - c.Assert(err, IsNil) - err = committer.Execute(context.Background()) - c.Assert(err, IsNil) - ch <- struct{}{} - }() - - // Wait until prewrite finishes - time.Sleep(200 * time.Millisecond) - // Should get nothing with max version, and **not pushing forward minCommitTS** of the primary lock - snapshot := s.store.GetSnapshot(math.MaxUint64) - _, err = snapshot.Get(context.Background(), []byte("k2")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - - initialCommitTS := committer.GetCommitTS() - c.Assert(failpoint.Disable("tikvclient/beforeCommit"), IsNil) - - <-ch - // check the minCommitTS is not pushed forward - snapshot = s.store.GetSnapshot(initialCommitTS) - v, err := snapshot.Get(context.Background(), []byte("k2")) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, []byte("v2")) -} diff --git a/store/tikv/tests/snapshot_test.go b/store/tikv/tests/snapshot_test.go deleted file mode 100644 index 8d9c3554b67c1..0000000000000 --- a/store/tikv/tests/snapshot_test.go +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "fmt" - "math" - "sync" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - tikverr "github.com/tikv/client-go/v2/error" - "github.com/tikv/client-go/v2/logutil" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/tikvrpc" - "go.uber.org/zap" -) - -type testSnapshotSuite struct { - OneByOneSuite - store tikv.StoreProbe - prefix string - rowNums []int -} - -var _ = Suite(&testSnapshotSuite{}) - -func (s *testSnapshotSuite) SetUpSuite(c *C) { - s.OneByOneSuite.SetUpSuite(c) - s.store = tikv.StoreProbe{KVStore: NewTestStore(c)} - s.prefix = fmt.Sprintf("snapshot_%d", time.Now().Unix()) - s.rowNums = append(s.rowNums, 1, 100, 191) -} - -func (s *testSnapshotSuite) TearDownSuite(c *C) { - txn := s.beginTxn(c) - scanner, err := txn.Iter(encodeKey(s.prefix, ""), nil) - c.Assert(err, IsNil) - c.Assert(scanner, NotNil) - for scanner.Valid() { - k := scanner.Key() - err = txn.Delete(k) - c.Assert(err, IsNil) - scanner.Next() - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = s.store.Close() - c.Assert(err, IsNil) - s.OneByOneSuite.TearDownSuite(c) -} - -func (s *testSnapshotSuite) beginTxn(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return txn -} - -func (s *testSnapshotSuite) checkAll(keys [][]byte, c *C) { - txn := s.beginTxn(c) - snapshot := txn.GetSnapshot() - m, err := snapshot.BatchGet(context.Background(), keys) - c.Assert(err, IsNil) - - scan, err := txn.Iter(encodeKey(s.prefix, ""), nil) - c.Assert(err, IsNil) - cnt := 0 - for scan.Valid() { - cnt++ - k := scan.Key() - v := scan.Value() - v2, ok := m[string(k)] - c.Assert(ok, IsTrue, Commentf("key: %q", k)) - c.Assert(v, BytesEquals, v2) - scan.Next() - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - c.Assert(m, HasLen, cnt) -} - -func (s *testSnapshotSuite) deleteKeys(keys [][]byte, c *C) { - txn := s.beginTxn(c) - for _, k := range keys { - err := txn.Delete(k) - c.Assert(err, IsNil) - } - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) -} - -func (s *testSnapshotSuite) TestBatchGet(c *C) { - for _, rowNum := range s.rowNums { - logutil.BgLogger().Debug("test BatchGet", - zap.Int("length", rowNum)) - txn := s.beginTxn(c) - for i := 0; i < rowNum; i++ { - k := encodeKey(s.prefix, s08d("key", i)) - err := txn.Set(k, valueBytes(i)) - c.Assert(err, IsNil) - } - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) - - keys := makeKeys(rowNum, s.prefix) - s.checkAll(keys, c) - s.deleteKeys(keys, c) - } -} - -type contextKey string - -func (s *testSnapshotSuite) TestSnapshotCache(c *C) { - txn := s.beginTxn(c) - c.Assert(txn.Set([]byte("x"), []byte("x")), IsNil) - c.Assert(txn.Delete([]byte("y")), IsNil) // store data is affected by other tests. - c.Assert(txn.Commit(context.Background()), IsNil) - - txn = s.beginTxn(c) - snapshot := txn.GetSnapshot() - _, err := snapshot.BatchGet(context.Background(), [][]byte{[]byte("x"), []byte("y")}) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("tikvclient/snapshot-get-cache-fail", `return(true)`), IsNil) - ctx := context.WithValue(context.Background(), contextKey("TestSnapshotCache"), true) - _, err = snapshot.Get(ctx, []byte("x")) - c.Assert(err, IsNil) - - _, err = snapshot.Get(ctx, []byte("y")) - c.Assert(tikverr.IsErrNotFound(err), IsTrue) - - c.Assert(failpoint.Disable("tikvclient/snapshot-get-cache-fail"), IsNil) -} - -func (s *testSnapshotSuite) TestBatchGetNotExist(c *C) { - for _, rowNum := range s.rowNums { - logutil.BgLogger().Debug("test BatchGetNotExist", - zap.Int("length", rowNum)) - txn := s.beginTxn(c) - for i := 0; i < rowNum; i++ { - k := encodeKey(s.prefix, s08d("key", i)) - err := txn.Set(k, valueBytes(i)) - c.Assert(err, IsNil) - } - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) - - keys := makeKeys(rowNum, s.prefix) - keys = append(keys, []byte("noSuchKey")) - s.checkAll(keys, c) - s.deleteKeys(keys, c) - } -} - -func makeKeys(rowNum int, prefix string) [][]byte { - keys := make([][]byte, 0, rowNum) - for i := 0; i < rowNum; i++ { - k := encodeKey(prefix, s08d("key", i)) - keys = append(keys, k) - } - return keys -} - -func (s *testSnapshotSuite) TestSkipLargeTxnLock(c *C) { - x := []byte("x_key_TestSkipLargeTxnLock") - y := []byte("y_key_TestSkipLargeTxnLock") - txn := s.beginTxn(c) - c.Assert(txn.Set(x, []byte("x")), IsNil) - c.Assert(txn.Set(y, []byte("y")), IsNil) - ctx := context.Background() - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - committer.SetLockTTL(3000) - c.Assert(committer.PrewriteAllMutations(ctx), IsNil) - - txn1 := s.beginTxn(c) - // txn1 is not blocked by txn in the large txn protocol. - _, err = txn1.Get(ctx, x) - c.Assert(tikverr.IsErrNotFound(errors.Trace(err)), IsTrue) - - res, err := toTiDBTxn(&txn1).BatchGet(ctx, toTiDBKeys([][]byte{x, y, []byte("z")})) - c.Assert(err, IsNil) - c.Assert(res, HasLen, 0) - - // Commit txn, check the final commit ts is pushed. - committer.SetCommitTS(txn.StartTS() + 1) - c.Assert(committer.CommitMutations(ctx), IsNil) - status, err := s.store.GetLockResolver().GetTxnStatus(txn.StartTS(), 0, x) - c.Assert(err, IsNil) - c.Assert(status.IsCommitted(), IsTrue) - c.Assert(status.CommitTS(), Greater, txn1.StartTS()) -} - -func (s *testSnapshotSuite) TestPointGetSkipTxnLock(c *C) { - x := []byte("x_key_TestPointGetSkipTxnLock") - y := []byte("y_key_TestPointGetSkipTxnLock") - txn := s.beginTxn(c) - c.Assert(txn.Set(x, []byte("x")), IsNil) - c.Assert(txn.Set(y, []byte("y")), IsNil) - ctx := context.Background() - committer, err := txn.NewCommitter(0) - c.Assert(err, IsNil) - committer.SetLockTTL(3000) - c.Assert(committer.PrewriteAllMutations(ctx), IsNil) - - snapshot := s.store.GetSnapshot(math.MaxUint64) - start := time.Now() - c.Assert(committer.GetPrimaryKey(), BytesEquals, x) - // Point get secondary key. Shouldn't be blocked by the lock and read old data. - _, err = snapshot.Get(ctx, y) - c.Assert(tikverr.IsErrNotFound(errors.Trace(err)), IsTrue) - c.Assert(time.Since(start), Less, 500*time.Millisecond) - - // Commit the primary key - committer.SetCommitTS(txn.StartTS() + 1) - committer.CommitMutations(ctx) - - snapshot = s.store.GetSnapshot(math.MaxUint64) - start = time.Now() - // Point get secondary key. Should read committed data. - value, err := snapshot.Get(ctx, y) - c.Assert(err, IsNil) - c.Assert(value, BytesEquals, []byte("y")) - c.Assert(time.Since(start), Less, 500*time.Millisecond) -} - -func (s *testSnapshotSuite) TestSnapshotThreadSafe(c *C) { - txn := s.beginTxn(c) - key := []byte("key_test_snapshot_threadsafe") - c.Assert(txn.Set(key, []byte("x")), IsNil) - ctx := context.Background() - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) - - snapshot := s.store.GetSnapshot(math.MaxUint64) - var wg sync.WaitGroup - wg.Add(5) - for i := 0; i < 5; i++ { - go func() { - for i := 0; i < 30; i++ { - _, err := snapshot.Get(ctx, key) - c.Assert(err, IsNil) - _, err = snapshot.BatchGet(ctx, [][]byte{key, []byte("key_not_exist")}) - c.Assert(err, IsNil) - } - wg.Done() - }() - } - wg.Wait() -} - -func (s *testSnapshotSuite) TestSnapshotRuntimeStats(c *C) { - reqStats := tikv.NewRegionRequestRuntimeStats() - tikv.RecordRegionRequestRuntimeStats(reqStats.Stats, tikvrpc.CmdGet, time.Second) - tikv.RecordRegionRequestRuntimeStats(reqStats.Stats, tikvrpc.CmdGet, time.Millisecond) - snapshot := s.store.GetSnapshot(0) - snapshot.SetRuntimeStats(&tikv.SnapshotRuntimeStats{}) - snapshot.MergeRegionRequestStats(reqStats.Stats) - snapshot.MergeRegionRequestStats(reqStats.Stats) - bo := tikv.NewBackofferWithVars(context.Background(), 2000, nil) - err := bo.BackoffWithMaxSleepTxnLockFast(30, errors.New("test")) - c.Assert(err, IsNil) - snapshot.RecordBackoffInfo(bo) - snapshot.RecordBackoffInfo(bo) - expect := "Get:{num_rpc:4, total_time:2s},txnLockFast_backoff:{num:2, total_time:60ms}" - c.Assert(snapshot.FormatStats(), Equals, expect) - detail := &kvrpcpb.ExecDetailsV2{ - TimeDetail: &kvrpcpb.TimeDetail{ - WaitWallTimeMs: 100, - ProcessWallTimeMs: 100, - }, - ScanDetailV2: &kvrpcpb.ScanDetailV2{ - ProcessedVersions: 10, - TotalVersions: 15, - RocksdbBlockReadCount: 20, - RocksdbBlockReadByte: 15, - RocksdbDeleteSkippedCount: 5, - RocksdbKeySkippedCount: 1, - RocksdbBlockCacheHitCount: 10, - }, - } - snapshot.MergeExecDetail(detail) - expect = "Get:{num_rpc:4, total_time:2s},txnLockFast_backoff:{num:2, total_time:60ms}, " + - "total_process_time: 100ms, total_wait_time: 100ms, " + - "scan_detail: {total_process_keys: 10, " + - "total_keys: 15, " + - "rocksdb: {delete_skipped_count: 5, " + - "key_skipped_count: 1, " + - "block: {cache_hit_count: 10, read_count: 20, read_byte: 15 Bytes}}}" - c.Assert(snapshot.FormatStats(), Equals, expect) - snapshot.MergeExecDetail(detail) - expect = "Get:{num_rpc:4, total_time:2s},txnLockFast_backoff:{num:2, total_time:60ms}, " + - "total_process_time: 200ms, total_wait_time: 200ms, " + - "scan_detail: {total_process_keys: 20, " + - "total_keys: 30, " + - "rocksdb: {delete_skipped_count: 10, " + - "key_skipped_count: 2, " + - "block: {cache_hit_count: 20, read_count: 40, read_byte: 30 Bytes}}}" - c.Assert(snapshot.FormatStats(), Equals, expect) -} diff --git a/store/tikv/tests/split_test.go b/store/tikv/tests/split_test.go deleted file mode 100644 index 09c3a5c1b6250..0000000000000 --- a/store/tikv/tests/split_test.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "sync" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/tidb/store/mockstore/mockcopr" - "github.com/tikv/client-go/v2/mockstore/cluster" - "github.com/tikv/client-go/v2/mockstore/mocktikv" - "github.com/tikv/client-go/v2/tikv" - pd "github.com/tikv/pd/client" -) - -type testSplitSuite struct { - OneByOneSuite - cluster cluster.Cluster - store tikv.StoreProbe - bo *tikv.Backoffer -} - -var _ = Suite(&testSplitSuite{}) - -func (s *testSplitSuite) SetUpTest(c *C) { - client, cluster, pdClient, err := mocktikv.NewTiKVAndPDClient("", mockcopr.NewCoprRPCHandler()) - c.Assert(err, IsNil) - mocktikv.BootstrapWithSingleStore(cluster) - s.cluster = cluster - store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0) - c.Assert(err, IsNil) - - // TODO: make this possible - // store, err := mockstore.NewMockStore( - // mockstore.WithClusterInspector(func(c cluster.Cluster) { - // mockstore.BootstrapWithSingleStore(c) - // s.cluster = c - // }), - // ) - // c.Assert(err, IsNil) - s.store = tikv.StoreProbe{KVStore: store} - s.bo = tikv.NewBackofferWithVars(context.Background(), 5000, nil) -} - -func (s *testSplitSuite) begin(c *C) tikv.TxnProbe { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return txn -} - -func (s *testSplitSuite) split(c *C, regionID uint64, key []byte) { - newRegionID, peerID := s.cluster.AllocID(), s.cluster.AllocID() - s.cluster.Split(regionID, newRegionID, key, []uint64{peerID}, peerID) -} - -func (s *testSplitSuite) TestSplitBatchGet(c *C) { - loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a")) - c.Assert(err, IsNil) - - txn := s.begin(c) - - keys := [][]byte{{'a'}, {'b'}, {'c'}} - _, region, err := s.store.GetRegionCache().GroupKeysByRegion(s.bo, keys, nil) - c.Assert(err, IsNil) - - s.split(c, loc.Region.GetID(), []byte("b")) - s.store.GetRegionCache().InvalidateCachedRegion(loc.Region) - - // mocktikv will panic if it meets a not-in-region key. - err = txn.BatchGetSingleRegion(s.bo, region, keys, func([]byte, []byte) {}) - c.Assert(err, IsNil) -} - -func (s *testSplitSuite) TestStaleEpoch(c *C) { - mockPDClient := &mockPDClient{client: s.store.GetRegionCache().PDClient()} - s.store.SetRegionCachePDClient(mockPDClient) - - loc, err := s.store.GetRegionCache().LocateKey(s.bo, []byte("a")) - c.Assert(err, IsNil) - - txn := s.begin(c) - err = txn.Set([]byte("a"), []byte("a")) - c.Assert(err, IsNil) - err = txn.Set([]byte("c"), []byte("c")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - // Initiate a split and disable the PD client. If it still works, the - // new region is updated from kvrpc. - s.split(c, loc.Region.GetID(), []byte("b")) - mockPDClient.disable() - - txn = s.begin(c) - _, err = txn.Get(context.TODO(), []byte("a")) - c.Assert(err, IsNil) - _, err = txn.Get(context.TODO(), []byte("c")) - c.Assert(err, IsNil) -} - -var errStopped = errors.New("stopped") - -type mockPDClient struct { - sync.RWMutex - client pd.Client - stop bool -} - -func (c *mockPDClient) disable() { - c.Lock() - defer c.Unlock() - c.stop = true -} - -func (c *mockPDClient) GetAllMembers(ctx context.Context) ([]*pdpb.Member, error) { - return nil, nil -} - -func (c *mockPDClient) GetClusterID(context.Context) uint64 { - return 1 -} - -func (c *mockPDClient) GetTS(ctx context.Context) (int64, int64, error) { - c.RLock() - defer c.RUnlock() - - if c.stop { - return 0, 0, errors.Trace(errStopped) - } - return c.client.GetTS(ctx) -} - -func (c *mockPDClient) GetLocalTS(ctx context.Context, dcLocation string) (int64, int64, error) { - return c.GetTS(ctx) -} - -func (c *mockPDClient) GetTSAsync(ctx context.Context) pd.TSFuture { - return nil -} - -func (c *mockPDClient) GetLocalTSAsync(ctx context.Context, dcLocation string) pd.TSFuture { - return nil -} - -func (c *mockPDClient) GetRegion(ctx context.Context, key []byte) (*pd.Region, error) { - c.RLock() - defer c.RUnlock() - - if c.stop { - return nil, errors.Trace(errStopped) - } - return c.client.GetRegion(ctx, key) -} - -func (c *mockPDClient) GetRegionFromMember(ctx context.Context, key []byte, memberURLs []string) (*pd.Region, error) { - return nil, nil -} - -func (c *mockPDClient) GetPrevRegion(ctx context.Context, key []byte) (*pd.Region, error) { - c.RLock() - defer c.RUnlock() - - if c.stop { - return nil, errors.Trace(errStopped) - } - return c.client.GetPrevRegion(ctx, key) -} - -func (c *mockPDClient) GetRegionByID(ctx context.Context, regionID uint64) (*pd.Region, error) { - c.RLock() - defer c.RUnlock() - - if c.stop { - return nil, errors.Trace(errStopped) - } - return c.client.GetRegionByID(ctx, regionID) -} - -func (c *mockPDClient) ScanRegions(ctx context.Context, startKey []byte, endKey []byte, limit int) ([]*pd.Region, error) { - c.RLock() - defer c.RUnlock() - - if c.stop { - return nil, errors.Trace(errStopped) - } - return c.client.ScanRegions(ctx, startKey, endKey, limit) -} - -func (c *mockPDClient) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, error) { - c.RLock() - defer c.RUnlock() - - if c.stop { - return nil, errors.Trace(errStopped) - } - return c.client.GetStore(ctx, storeID) -} - -func (c *mockPDClient) GetAllStores(ctx context.Context, opts ...pd.GetStoreOption) ([]*metapb.Store, error) { - c.RLock() - defer c.Unlock() - - if c.stop { - return nil, errors.Trace(errStopped) - } - return c.client.GetAllStores(ctx) -} - -func (c *mockPDClient) UpdateGCSafePoint(ctx context.Context, safePoint uint64) (uint64, error) { - panic("unimplemented") -} - -func (c *mockPDClient) UpdateServiceGCSafePoint(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { - panic("unimplemented") -} - -func (c *mockPDClient) Close() {} - -func (c *mockPDClient) ScatterRegion(ctx context.Context, regionID uint64) error { - return nil -} - -func (c *mockPDClient) ScatterRegions(ctx context.Context, regionsID []uint64, opts ...pd.RegionsOption) (*pdpb.ScatterRegionResponse, error) { - return nil, nil -} - -func (c *mockPDClient) SplitRegions(ctx context.Context, splitKeys [][]byte, opts ...pd.RegionsOption) (*pdpb.SplitRegionsResponse, error) { - return nil, nil -} - -func (c *mockPDClient) GetOperator(ctx context.Context, regionID uint64) (*pdpb.GetOperatorResponse, error) { - return &pdpb.GetOperatorResponse{Status: pdpb.OperatorStatus_SUCCESS}, nil -} - -func (c *mockPDClient) GetLeaderAddr() string { return "mockpd" } diff --git a/store/tikv/tests/store_fail_test.go b/store/tikv/tests/store_fail_test.go deleted file mode 100644 index c04380c1abf29..0000000000000 --- a/store/tikv/tests/store_fail_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "sync" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/failpoint" -) - -func (s *testStoreSerialSuite) TestFailBusyServerKV(c *C) { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = txn.Set([]byte("key"), []byte("value")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - var wg sync.WaitGroup - wg.Add(2) - - c.Assert(failpoint.Enable("tikvclient/rpcServerBusy", `return(true)`), IsNil) - go func() { - defer wg.Done() - time.Sleep(time.Millisecond * 100) - c.Assert(failpoint.Disable("tikvclient/rpcServerBusy"), IsNil) - }() - - go func() { - defer wg.Done() - txn, err := s.store.Begin() - c.Assert(err, IsNil) - val, err := txn.Get(context.TODO(), []byte("key")) - c.Assert(err, IsNil) - c.Assert(val, BytesEquals, []byte("value")) - }() - - wg.Wait() -} diff --git a/store/tikv/tests/store_test.go b/store/tikv/tests/store_test.go deleted file mode 100644 index 16f67ead98022..0000000000000 --- a/store/tikv/tests/store_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "sync" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/kvproto/pkg/kvrpcpb" - "github.com/tikv/client-go/v2/oracle" - "github.com/tikv/client-go/v2/oracle/oracles" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/tikvrpc" -) - -type testStoreSuite struct { - testStoreSuiteBase -} - -type testStoreSerialSuite struct { - testStoreSuiteBase -} - -type testStoreSuiteBase struct { - OneByOneSuite - store tikv.StoreProbe -} - -var _ = Suite(&testStoreSuite{}) -var _ = SerialSuites(&testStoreSerialSuite{}) - -func (s *testStoreSuiteBase) SetUpTest(c *C) { - s.store = tikv.StoreProbe{KVStore: NewTestStore(c)} -} - -func (s *testStoreSuiteBase) TearDownTest(c *C) { - c.Assert(s.store.Close(), IsNil) -} - -func (s *testStoreSuite) TestOracle(c *C) { - o := &oracles.MockOracle{} - s.store.SetOracle(o) - - ctx := context.Background() - t1, err := s.store.GetTimestampWithRetry(tikv.NewBackofferWithVars(ctx, 100, nil), oracle.GlobalTxnScope) - c.Assert(err, IsNil) - t2, err := s.store.GetTimestampWithRetry(tikv.NewBackofferWithVars(ctx, 100, nil), oracle.GlobalTxnScope) - c.Assert(err, IsNil) - c.Assert(t1, Less, t2) - - t1, err = o.GetLowResolutionTimestamp(ctx, &oracle.Option{}) - c.Assert(err, IsNil) - t2, err = o.GetLowResolutionTimestamp(ctx, &oracle.Option{}) - c.Assert(err, IsNil) - c.Assert(t1, Less, t2) - f := o.GetLowResolutionTimestampAsync(ctx, &oracle.Option{}) - c.Assert(f, NotNil) - _ = o.UntilExpired(0, 0, &oracle.Option{}) - - // Check retry. - var wg sync.WaitGroup - wg.Add(2) - - o.Disable() - go func() { - defer wg.Done() - time.Sleep(time.Millisecond * 100) - o.Enable() - }() - - go func() { - defer wg.Done() - t3, err := s.store.GetTimestampWithRetry(tikv.NewBackofferWithVars(ctx, 5000, nil), oracle.GlobalTxnScope) - c.Assert(err, IsNil) - c.Assert(t2, Less, t3) - expired := s.store.GetOracle().IsExpired(t2, 50, &oracle.Option{}) - c.Assert(expired, IsTrue) - }() - - wg.Wait() -} - -type checkRequestClient struct { - tikv.Client - priority kvrpcpb.CommandPri -} - -func (c *checkRequestClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { - resp, err := c.Client.SendRequest(ctx, addr, req, timeout) - if c.priority != req.Priority { - if resp.Resp != nil { - if getResp, ok := resp.Resp.(*kvrpcpb.GetResponse); ok { - getResp.Error = &kvrpcpb.KeyError{ - Abort: "request check error", - } - } - } - } - return resp, err -} - -func (s *testStoreSuite) TestRequestPriority(c *C) { - client := &checkRequestClient{ - Client: s.store.GetTiKVClient(), - } - s.store.SetTiKVClient(client) - - // Cover 2PC commit. - txn, err := s.store.Begin() - c.Assert(err, IsNil) - client.priority = kvrpcpb.CommandPri_High - txn.SetPriority(tikv.PriorityHigh) - err = txn.Set([]byte("key"), []byte("value")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - // Cover the basic Get request. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - client.priority = kvrpcpb.CommandPri_Low - txn.SetPriority(tikv.PriorityLow) - _, err = txn.Get(context.TODO(), []byte("key")) - c.Assert(err, IsNil) - - // A counter example. - client.priority = kvrpcpb.CommandPri_Low - txn.SetPriority(tikv.PriorityNormal) - _, err = txn.Get(context.TODO(), []byte("key")) - // err is translated to "try again later" by backoffer, so doesn't check error value here. - c.Assert(err, NotNil) - - // Cover Seek request. - client.priority = kvrpcpb.CommandPri_High - txn.SetPriority(tikv.PriorityHigh) - iter, err := txn.Iter([]byte("key"), nil) - c.Assert(err, IsNil) - for iter.Valid() { - c.Assert(iter.Next(), IsNil) - } - iter.Close() -} diff --git a/store/tikv/tests/ticlient_slow_test.go b/store/tikv/tests/ticlient_slow_test.go deleted file mode 100644 index 0f7afe8f4bb0b..0000000000000 --- a/store/tikv/tests/ticlient_slow_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !race - -package tikv_test - -import ( - "context" - - . "github.com/pingcap/check" - "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/mockstore" - "github.com/tikv/client-go/v2/tikv" -) - -func (s *testTiclientSuite) TestSplitRegionIn2PC(c *C) { - if *mockstore.WithTiKV { - c.Skip("scatter will timeout with single node TiKV") - } - config := tikv.ConfigProbe{} - const preSplitThresholdInTest = 500 - old := config.LoadPreSplitDetectThreshold() - defer config.StorePreSplitDetectThreshold(old) - config.StorePreSplitDetectThreshold(preSplitThresholdInTest) - - old = config.LoadPreSplitSizeThreshold() - defer config.StorePreSplitSizeThreshold(old) - config.StorePreSplitSizeThreshold(5000) - - bo := tikv.NewBackofferWithVars(context.Background(), 1, nil) - checkKeyRegion := func(bo *tikv.Backoffer, start, end []byte, checker Checker) { - // Check regions after split. - loc1, err := s.store.GetRegionCache().LocateKey(bo, start) - c.Assert(err, IsNil) - loc2, err := s.store.GetRegionCache().LocateKey(bo, end) - c.Assert(err, IsNil) - c.Assert(loc1.Region.GetID(), checker, loc2.Region.GetID()) - } - mode := []string{"optimistic", "pessimistic"} - var ( - startKey []byte - endKey []byte - ) - ctx := context.Background() - for _, m := range mode { - if m == "optimistic" { - startKey = encodeKey(s.prefix, s08d("key", 0)) - endKey = encodeKey(s.prefix, s08d("key", preSplitThresholdInTest)) - } else { - startKey = encodeKey(s.prefix, s08d("pkey", 0)) - endKey = encodeKey(s.prefix, s08d("pkey", preSplitThresholdInTest)) - } - // Check before test. - checkKeyRegion(bo, startKey, endKey, Equals) - txn := s.beginTxn(c) - if m == "pessimistic" { - txn.SetPessimistic(true) - lockCtx := &kv.LockCtx{} - lockCtx.ForUpdateTS = txn.StartTS() - keys := make([][]byte, 0, preSplitThresholdInTest) - for i := 0; i < preSplitThresholdInTest; i++ { - keys = append(keys, encodeKey(s.prefix, s08d("pkey", i))) - } - err := txn.LockKeys(ctx, lockCtx, keys...) - c.Assert(err, IsNil) - checkKeyRegion(bo, startKey, endKey, Not(Equals)) - } - var err error - for i := 0; i < preSplitThresholdInTest; i++ { - if m == "optimistic" { - err = txn.Set(encodeKey(s.prefix, s08d("key", i)), valueBytes(i)) - } else { - err = txn.Set(encodeKey(s.prefix, s08d("pkey", i)), valueBytes(i)) - } - c.Assert(err, IsNil) - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - // Check region split after test. - checkKeyRegion(bo, startKey, endKey, Not(Equals)) - } -} diff --git a/store/tikv/tests/ticlient_test.go b/store/tikv/tests/ticlient_test.go deleted file mode 100644 index ad227a6c7961c..0000000000000 --- a/store/tikv/tests/ticlient_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "fmt" - "time" - - . "github.com/pingcap/check" - "github.com/pingcap/tidb/kv" - tikvstore "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/tikv" -) - -type testTiclientSuite struct { - OneByOneSuite - store *tikv.KVStore - // prefix is prefix of each key in this test. It is used for table isolation, - // or it may pollute other data. - prefix string -} - -var _ = Suite(&testTiclientSuite{}) - -func (s *testTiclientSuite) SetUpSuite(c *C) { - s.OneByOneSuite.SetUpSuite(c) - s.store = NewTestStore(c) - s.prefix = fmt.Sprintf("ticlient_%d", time.Now().Unix()) -} - -func (s *testTiclientSuite) TearDownSuite(c *C) { - // Clean all data, or it may pollute other data. - txn := s.beginTxn(c) - scanner, err := txn.Iter(encodeKey(s.prefix, ""), nil) - c.Assert(err, IsNil) - c.Assert(scanner, NotNil) - for scanner.Valid() { - k := scanner.Key() - err = txn.Delete(k) - c.Assert(err, IsNil) - scanner.Next() - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = s.store.Close() - c.Assert(err, IsNil) - s.OneByOneSuite.TearDownSuite(c) -} - -func (s *testTiclientSuite) beginTxn(c *C) *tikv.KVTxn { - txn, err := s.store.Begin() - c.Assert(err, IsNil) - return txn -} - -func (s *testTiclientSuite) TestSingleKey(c *C) { - txn := s.beginTxn(c) - err := txn.Set(encodeKey(s.prefix, "key"), []byte("value")) - c.Assert(err, IsNil) - err = txn.LockKeys(context.Background(), new(tikvstore.LockCtx), encodeKey(s.prefix, "key")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - - txn = s.beginTxn(c) - val, err := txn.Get(context.TODO(), encodeKey(s.prefix, "key")) - c.Assert(err, IsNil) - c.Assert(val, BytesEquals, []byte("value")) - - txn = s.beginTxn(c) - err = txn.Delete(encodeKey(s.prefix, "key")) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) -} - -func (s *testTiclientSuite) TestMultiKeys(c *C) { - const keyNum = 100 - - txn := s.beginTxn(c) - for i := 0; i < keyNum; i++ { - err := txn.Set(encodeKey(s.prefix, s08d("key", i)), valueBytes(i)) - c.Assert(err, IsNil) - } - err := txn.Commit(context.Background()) - c.Assert(err, IsNil) - - txn = s.beginTxn(c) - for i := 0; i < keyNum; i++ { - val, err1 := txn.Get(context.TODO(), encodeKey(s.prefix, s08d("key", i))) - c.Assert(err1, IsNil) - c.Assert(val, BytesEquals, valueBytes(i)) - } - - txn = s.beginTxn(c) - for i := 0; i < keyNum; i++ { - err = txn.Delete(encodeKey(s.prefix, s08d("key", i))) - c.Assert(err, IsNil) - } - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) -} - -func (s *testTiclientSuite) TestNotExist(c *C) { - txn := s.beginTxn(c) - _, err := txn.Get(context.TODO(), encodeKey(s.prefix, "noSuchKey")) - c.Assert(err, NotNil) -} - -func (s *testTiclientSuite) TestLargeRequest(c *C) { - largeValue := make([]byte, 9*1024*1024) // 9M value. - txn := s.beginTxn(c) - txn.GetUnionStore().SetEntrySizeLimit(1024*1024, 100*1024*1024) - err := txn.Set([]byte("key"), largeValue) - c.Assert(err, NotNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - c.Assert(kv.IsTxnRetryableError(err), IsFalse) -} diff --git a/store/tikv/tests/util_test.go b/store/tikv/tests/util_test.go deleted file mode 100644 index 9eab98ffabb41..0000000000000 --- a/store/tikv/tests/util_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package tikv_test - -import ( - "context" - "flag" - "fmt" - "strings" - "unsafe" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" - "github.com/pingcap/tidb/kv" - txndriver "github.com/pingcap/tidb/store/driver/txn" - "github.com/pingcap/tidb/store/mockstore/unistore" - "github.com/tikv/client-go/v2/config" - "github.com/tikv/client-go/v2/mockstore" - "github.com/tikv/client-go/v2/tikv" - "github.com/tikv/client-go/v2/util/codec" - pd "github.com/tikv/pd/client" -) - -var ( - pdAddrs = flag.String("pd-addrs", "127.0.0.1:2379", "pd addrs") -) - -// NewTestStore creates a KVStore for testing purpose. -func NewTestStore(c *C) *tikv.KVStore { - if !flag.Parsed() { - flag.Parse() - } - - if *mockstore.WithTiKV { - addrs := strings.Split(*pdAddrs, ",") - pdClient, err := pd.NewClient(addrs, pd.SecurityOption{}) - c.Assert(err, IsNil) - var securityConfig config.Security - tlsConfig, err := securityConfig.ToTLSConfig() - c.Assert(err, IsNil) - spKV, err := tikv.NewEtcdSafePointKV(addrs, tlsConfig) - c.Assert(err, IsNil) - store, err := tikv.NewKVStore("test-store", &tikv.CodecPDClient{Client: pdClient}, spKV, tikv.NewRPCClient(securityConfig)) - c.Assert(err, IsNil) - err = clearStorage(store) - c.Assert(err, IsNil) - return store - } - client, pdClient, cluster, err := unistore.New("") - c.Assert(err, IsNil) - unistore.BootstrapWithSingleStore(cluster) - store, err := tikv.NewTestTiKVStore(client, pdClient, nil, nil, 0) - c.Assert(err, IsNil) - return store -} - -func clearStorage(store *tikv.KVStore) error { - txn, err := store.Begin() - if err != nil { - return errors.Trace(err) - } - iter, err := txn.Iter(nil, nil) - if err != nil { - return errors.Trace(err) - } - for iter.Valid() { - txn.Delete(iter.Key()) - if err := iter.Next(); err != nil { - return errors.Trace(err) - } - } - return txn.Commit(context.Background()) -} - -// OneByOneSuite is a suite, When with-tikv flag is true, there is only one storage, so the test suite have to run one by one. -type OneByOneSuite = mockstore.OneByOneSuite - -func encodeKey(prefix, s string) []byte { - return codec.EncodeBytes(nil, []byte(fmt.Sprintf("%s_%s", prefix, s))) -} - -func valueBytes(n int) []byte { - return []byte(fmt.Sprintf("value%d", n)) -} - -// s08d is for returning format string "%s%08d" to keep string sorted. -// e.g.: "0002" < "0011", otherwise "2" > "11" -func s08d(prefix string, n int) string { - return fmt.Sprintf("%s%08d", prefix, n) -} - -func toTiDBTxn(txn *tikv.TxnProbe) kv.Transaction { - return txndriver.NewTiKVTxn(txn.KVTxn) -} - -func toTiDBKeys(keys [][]byte) []kv.Key { - kvKeys := *(*[]kv.Key)(unsafe.Pointer(&keys)) - return kvKeys -} From 03847a8ded0cce027e90f1c3f8a389ea7e392232 Mon Sep 17 00:00:00 2001 From: ClSlaid Date: Thu, 17 Jun 2021 21:02:38 +0800 Subject: [PATCH 03/25] executor, infoschema: Add cluster_statements_summary_evicted table to TiDB (#25418) --- executor/builder.go | 1 + executor/infoschema_reader.go | 16 ++++++++-------- infoschema/cluster.go | 3 +++ infoschema/tables.go | 1 + infoschema/tables_test.go | 34 ++++++++++++++++++++++++++++++++-- 5 files changed, 45 insertions(+), 10 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index f9a3c0746ebe0..b3bbdc579d6cb 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -1563,6 +1563,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo strings.ToLower(infoschema.TableStatementsSummaryEvicted), strings.ToLower(infoschema.ClusterTableStatementsSummary), strings.ToLower(infoschema.ClusterTableStatementsSummaryHistory), + strings.ToLower(infoschema.ClusterTableStatementsSummaryEvicted), strings.ToLower(infoschema.TablePlacementPolicy), strings.ToLower(infoschema.TableClientErrorsSummaryGlobal), strings.ToLower(infoschema.TableClientErrorsSummaryByUser), diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index b01972726991a..ef39c9cc96d35 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -145,11 +145,11 @@ func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Contex err = e.dataForTiKVStoreStatus(sctx) case infoschema.TableStatementsSummary, infoschema.TableStatementsSummaryHistory, + infoschema.TableStatementsSummaryEvicted, infoschema.ClusterTableStatementsSummary, - infoschema.ClusterTableStatementsSummaryHistory: + infoschema.ClusterTableStatementsSummaryHistory, + infoschema.ClusterTableStatementsSummaryEvicted: err = e.setDataForStatementsSummary(sctx, e.table.Name.O) - case infoschema.TableStatementsSummaryEvicted: - e.setDataForStatementsSummaryEvicted(sctx) case infoschema.TablePlacementPolicy: err = e.setDataForPlacementPolicy(sctx) case infoschema.TableClientErrorsSummaryGlobal, @@ -1912,10 +1912,14 @@ func (e *memtableRetriever) setDataForStatementsSummary(ctx sessionctx.Context, case infoschema.TableStatementsSummaryHistory, infoschema.ClusterTableStatementsSummaryHistory: e.rows = stmtsummary.StmtSummaryByDigestMap.ToHistoryDatum(user, isSuper) + case infoschema.TableStatementsSummaryEvicted, + infoschema.ClusterTableStatementsSummaryEvicted: + e.rows = stmtsummary.StmtSummaryByDigestMap.ToEvictedCountDatum() } switch tableName { case infoschema.ClusterTableStatementsSummary, - infoschema.ClusterTableStatementsSummaryHistory: + infoschema.ClusterTableStatementsSummaryHistory, + infoschema.ClusterTableStatementsSummaryEvicted: rows, err := infoschema.AppendHostInfoToRows(ctx, e.rows) if err != nil { return err @@ -2101,10 +2105,6 @@ func (e *memtableRetriever) setDataForClusterDeadlock(ctx sessionctx.Context) er return nil } -func (e *memtableRetriever) setDataForStatementsSummaryEvicted(ctx sessionctx.Context) { - e.rows = stmtsummary.StmtSummaryByDigestMap.ToEvictedCountDatum() -} - type hugeMemTableRetriever struct { dummyCloser table *model.TableInfo diff --git a/infoschema/cluster.go b/infoschema/cluster.go index 20589ad7a0c67..e4e14e195826f 100644 --- a/infoschema/cluster.go +++ b/infoschema/cluster.go @@ -37,6 +37,8 @@ const ( ClusterTableStatementsSummary = "CLUSTER_STATEMENTS_SUMMARY" // ClusterTableStatementsSummaryHistory is the string constant of cluster statement summary history table. ClusterTableStatementsSummaryHistory = "CLUSTER_STATEMENTS_SUMMARY_HISTORY" + // ClusterTableStatementsSummaryEvicted is the string constant of cluster statement summary evict table. + ClusterTableStatementsSummaryEvicted = "CLUSTER_STATEMENTS_SUMMARY_EVICTED" // ClusterTableTiDBTrx is the string constant of cluster transaction running table. ClusterTableTiDBTrx = "CLUSTER_TIDB_TRX" // ClusterTableDeadlocks is the string constant of cluster dead lock table. @@ -49,6 +51,7 @@ var memTableToClusterTables = map[string]string{ TableProcesslist: ClusterTableProcesslist, TableStatementsSummary: ClusterTableStatementsSummary, TableStatementsSummaryHistory: ClusterTableStatementsSummaryHistory, + TableStatementsSummaryEvicted: ClusterTableStatementsSummaryEvicted, TableTiDBTrx: ClusterTableTiDBTrx, TableDeadlocks: ClusterTableDeadlocks, } diff --git a/infoschema/tables.go b/infoschema/tables.go index a1404ac8c717b..cc66f0afd6cd0 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -249,6 +249,7 @@ var tableIDMap = map[string]int64{ ClusterTableDeadlocks: autoid.InformationSchemaDBID + 73, TableDataLockWaits: autoid.InformationSchemaDBID + 74, TableStatementsSummaryEvicted: autoid.InformationSchemaDBID + 75, + ClusterTableStatementsSummaryEvicted: autoid.InformationSchemaDBID + 76, } type columnInfo struct { diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 6cf0acfb68921..38857e40b9563 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1374,12 +1374,18 @@ func (s *testTableSuite) TestStmtSummarySensitiveQuery(c *C) { )) } +// test stmtSummaryEvictedCount func (s *testTableSuite) TestSimpleStmtSummaryEvictedCount(c *C) { now := time.Now().Unix() interval := int64(1800) beginTimeForCurInterval := now - now%interval tk := s.newTestKitWithPlanCache(c) tk.MustExec(fmt.Sprintf("set global tidb_stmt_summary_refresh_interval = %v", interval)) + + // clean up side effects + defer tk.MustExec("set global tidb_stmt_summary_max_stmt_count = 100") + defer tk.MustExec("set global tidb_stmt_summary_refresh_interval = 1800") + tk.MustExec("set global tidb_enable_stmt_summary = 0") tk.MustExec("set global tidb_enable_stmt_summary = 1") // first sql @@ -1395,10 +1401,34 @@ func (s *testTableSuite) TestSimpleStmtSummaryEvictedCount(c *C) { int64(2)), )) // TODO: Add more tests. +} +// test stmtSummaryEvictedCount cluster table +func (s *testClusterTableSuite) TestStmtSummaryEvictedCountTable(c *C) { + tk := s.newTestKitWithRoot(c) + // disable refreshing + tk.MustExec("set global tidb_stmt_summary_refresh_interval=9999") + // set information_schema.statements_summary's size to 1 + tk.MustExec("set global tidb_stmt_summary_max_stmt_count = 1") // clean up side effects - tk.MustExec("set global tidb_stmt_summary_max_stmt_count = 100") - tk.MustExec("set global tidb_stmt_summary_refresh_interval = 1800") + defer tk.MustExec("set global tidb_stmt_summary_max_stmt_count = 100") + defer tk.MustExec("set global tidb_stmt_summary_refresh_interval = 1800") + // clear information_schema.statements_summary + tk.MustExec("set global tidb_enable_stmt_summary=0") + tk.MustExec("set global tidb_enable_stmt_summary=1") + + // make a new session for test... + tk = s.newTestKitWithRoot(c) + // first sql + tk.MustExec("show databases;") + // second sql, evict former sql from stmt_summary + tk.MustQuery("select evicted_count from information_schema.cluster_statements_summary_evicted;"). + Check(testkit.Rows("1")) + // after executed the sql above + tk.MustQuery("select evicted_count from information_schema.cluster_statements_summary_evicted;"). + Check(testkit.Rows("2")) + // TODO: Add more tests. + } func (s *testTableSuite) TestStmtSummaryTableOther(c *C) { From 2f510fbb41250ab81e710f14b74d07b731d60107 Mon Sep 17 00:00:00 2001 From: Morgan Tocker Date: Thu, 17 Jun 2021 19:04:38 -0600 Subject: [PATCH 04/25] docs: update dynamic privileges proposal for SEM changes (#25503) --- docs/design/2021-03-09-dynamic-privileges.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/design/2021-03-09-dynamic-privileges.md b/docs/design/2021-03-09-dynamic-privileges.md index c85c0dc0c5305..ea7bf9836ef7e 100644 --- a/docs/design/2021-03-09-dynamic-privileges.md +++ b/docs/design/2021-03-09-dynamic-privileges.md @@ -1,7 +1,7 @@ # Proposal: - Author(s): [morgo](https://github.com/morgo) -- Last updated: May 04, 2021 +- Last updated: June 16, 2021 - Discussion at: N/A ## Table of Contents @@ -233,14 +233,16 @@ No change | `SYSTEM_VARIABLES_ADMIN` | Allows changing any GLOBAL system variable. | Currently this required `SUPER`. It will now require `SYSTEM_VARIABLES_ADMIN` or `SUPER`. | | `ROLE_ADMIN` | Allows granting and revoking roles. | Won’t allow revoking on restricted_users (see below). | | `CONNECTION_ADMIN` | Allows killing connections. | Like `PROCESS` static privilege, but slightly more restrictive (no show processlist). | +| `SYSTEM_USER` | The user can't be altered or dropped by only the `CREATE USER` privilege. | Helps prevent security escalations. | #### TiDB Extensions | Privilege Name | Description | Notes | | --------------- | --------------- | --------------- | +| `RESTORE_ADMIN` | Restoring should require more permissions than backup because it is a higher risk operation. | It is inspired by MySQL's BACKUP_ADMIN/CLONE_ADMIN but not applicable to MySQL due to no online restore. | | `RESTRICTED_VARIABLES_ADMIN` | Allows changing a restricted `GLOBAL` system variable. | Currently in SEM all high risk variables are unloaded. TBD, it might be required in future that they are only visible/settable to those with this privilege and not SUPER. | | `RESTRICTED_STATUS_ADMIN` | Allows observing restricted status variables. | i.e. `SHOW GLOBAL STATUS` by default hides some statistics when `SEM` is enabled. | -| `RESTRICTED_CONNECTION_ADMIN` | A special privilege to say that their connections, etc. can’t be killed by SUPER users AND they can kill connections by all other users. Affects `KILL`, `KILL TIDB` commands. | It is intended for the CloudAdmin user in DBaaS. | +| `RESTRICTED_CONNECTION_ADMIN` | Killing connections that belong to your own user is always permitted. Killing connections that belong to other users requires `CONNECTION_ADMIN` or `SUPER`; but there is an exception for `RESTRICTED_USER_ADMIN` users. To kill these connections also requires `RESTRICTED_CONNECTION_ADMIN`. This affects `KILL`, `KILL TIDB` commands. | It is intended for the CloudAdmin user in DBaaS. | | `RESTRICTED_USER_ADMIN` | A special privilege to say that their access can’t be changed by `SUPER` users. Statements `DROP USER`, `SET PASSWORD`, `ALTER USER`, `REVOKE` are all limited. | It is intended for the CloudAdmin user in DbaaS. | | `RESTRICTED_TABLES_ADMIN` | A special privilege which means that the SEM hidden table semantic doesn’t apply. | It is intended for the CloudAdmin user in DbaaS. | From 507ee3e1ce16790d3782c76daf56eb87dcd237c1 Mon Sep 17 00:00:00 2001 From: xhe Date: Fri, 18 Jun 2021 10:44:38 +0800 Subject: [PATCH 05/25] *: hide txn scope setting (#25519) --- sessionctx/variable/sysvar.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 2ac1e0b403d7d..6fc0275ac3841 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -810,7 +810,8 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeGlobal, Name: InitConnect, Value: ""}, /* TiDB specific variables */ - {Scope: ScopeSession, Name: TiDBTxnScope, skipInit: true, Value: func() string { + // TODO: TiDBTxnScope is hidden because local txn feature is not done. + {Scope: ScopeSession, Name: TiDBTxnScope, skipInit: true, Hidden: true, Value: func() string { if isGlobal, _ := config.GetTxnScopeFromConfig(); isGlobal { return kv.GlobalTxnScope } From 2653b128edf243f3275745d7c57bc65b2103be04 Mon Sep 17 00:00:00 2001 From: Yuanjia Zhang Date: Fri, 18 Jun 2021 11:02:38 +0800 Subject: [PATCH 06/25] planner: forbid BatchPointGet on tables partitioned by compound expressions (#25538) --- executor/partition_table_test.go | 39 ++++++++++++++++++++++++++++++++ planner/core/point_get_plan.go | 7 ++++++ table/tables/partition.go | 11 +++++---- 3 files changed, 53 insertions(+), 4 deletions(-) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 469808f572329..3d4ade1776e54 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -684,6 +684,45 @@ func (s *partitionTableSuite) TestDynamicPruningUnderIndexJoin(c *C) { tk.MustQuery(`select /*+ INL_JOIN(touter, tnormal) */ tnormal.* from touter join tnormal use index(idx_b) on touter.b = tnormal.b`).Sort().Rows()) } +func (s *partitionTableSuite) TestIssue25527(c *C) { + if israce.RaceEnabled { + c.Skip("exhaustive types test, skip race test") + } + + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("create database test_issue_25527") + tk.MustExec("use test_issue_25527") + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + tk.MustExec("set @@session.tidb_enable_list_partition = ON") + + // the original case + tk.MustExec(`CREATE TABLE t ( + col1 tinyint(4) primary key + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) + PARTITIONS 6`) + tk.MustExec(`insert into t values(-128), (107)`) + tk.MustExec(`prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'`) + tk.MustExec(`set @a=-128, @b=107, @c=-128`) + tk.MustQuery(`execute stmt using @a,@b,@c`).Sort().Check(testkit.Rows("-128", "107")) + + // the minimal reproducible case for hash partitioning + tk.MustExec(`CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2`) + tk.MustExec(`insert into t0 values (1)`) + tk.MustQuery(`select a from t0 where a in (1)`).Check(testkit.Rows("1")) + + // the minimal reproducible case for range partitioning + tk.MustExec(`create table t1 (a int primary key) partition by range (a+5) ( + partition p0 values less than(10), partition p1 values less than(20))`) + tk.MustExec(`insert into t1 values (5)`) + tk.MustQuery(`select a from t1 where a in (5)`).Check(testkit.Rows("5")) + + // the minimal reproducible case for list partitioning + tk.MustExec(`create table t2 (a int primary key) partition by list (a+5) ( + partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12))`) + tk.MustExec(`insert into t2 values (5)`) + tk.MustQuery(`select a from t2 where a in (5)`).Check(testkit.Rows("5")) +} + func (s *partitionTableSuite) TestBatchGetforRangeandListPartitionTable(c *C) { if israce.RaceEnabled { c.Skip("exhaustive types test, skip race test") diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index a1e0f7184dbc8..12c385e6f4500 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -539,6 +539,13 @@ func newBatchPointGetPlan( if partitionExpr == nil { return nil } + + if partitionExpr.Expr == nil { + return nil + } + if _, ok := partitionExpr.Expr.(*expression.Column); !ok { + return nil + } } if handleCol != nil { var handles = make([]kv.Handle, len(patternInExpr.List)) diff --git a/table/tables/partition.go b/table/tables/partition.go index 165f188866550..74c473268dec4 100644 --- a/table/tables/partition.go +++ b/table/tables/partition.go @@ -558,15 +558,17 @@ func findIdxByColUniqueID(cols []*expression.Column, col *expression.Column) int return -1 } -func extractListPartitionExprColumns(ctx sessionctx.Context, pi *model.PartitionInfo, columns []*expression.Column, names types.NameSlice) ([]*expression.Column, []int, error) { +func extractListPartitionExprColumns(ctx sessionctx.Context, pi *model.PartitionInfo, columns []*expression.Column, names types.NameSlice) (expression.Expression, []*expression.Column, []int, error) { var cols []*expression.Column + var partExpr expression.Expression if len(pi.Columns) == 0 { schema := expression.NewSchema(columns...) exprs, err := expression.ParseSimpleExprsWithNames(ctx, pi.Expr, schema, names) if err != nil { - return nil, nil, err + return nil, nil, nil, err } cols = expression.ExtractColumns(exprs[0]) + partExpr = exprs[0] } else { for _, col := range pi.Columns { idx := expression.FindFieldNameIdxByColName(names, col.L) @@ -584,14 +586,14 @@ func extractListPartitionExprColumns(ctx sessionctx.Context, pi *model.Partition deDupCols = append(deDupCols, c) } } - return deDupCols, offset, nil + return partExpr, deDupCols, offset, nil } func generateListPartitionExpr(ctx sessionctx.Context, tblInfo *model.TableInfo, columns []*expression.Column, names types.NameSlice) (*PartitionExpr, error) { // The caller should assure partition info is not nil. pi := tblInfo.GetPartitionInfo() - exprCols, offset, err := extractListPartitionExprColumns(ctx, pi, columns, names) + partExpr, exprCols, offset, err := extractListPartitionExprColumns(ctx, pi, columns, names) if err != nil { return nil, err } @@ -607,6 +609,7 @@ func generateListPartitionExpr(ctx sessionctx.Context, tblInfo *model.TableInfo, ret := &PartitionExpr{ ForListPruning: listPrune, ColumnOffset: offset, + Expr: partExpr, } return ret, nil } From 0e32561000eb19615f91c235858077a73174b662 Mon Sep 17 00:00:00 2001 From: ZhuoZhi <517770911@qq.com> Date: Fri, 18 Jun 2021 11:30:38 +0800 Subject: [PATCH 07/25] planner: fix getPartitionColumnPos panic (#25524) --- executor/point_get_test.go | 49 ++++++++++++++++++++++++++++++++++ planner/core/point_get_plan.go | 17 +++++------- 2 files changed, 55 insertions(+), 11 deletions(-) diff --git a/executor/point_get_test.go b/executor/point_get_test.go index 8a9dbdbe135f3..0d6798b9d4449 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -159,6 +159,55 @@ func (s *testPointGetSuite) TestPointGetDataTooLong(c *C) { tk.MustExec("drop table if exists PK_1389;") } +// issue #25489 +func (s *testPointGetSuite) TestIssue25489(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") + tk.MustExec("set @@session.tidb_enable_list_partition = ON") + tk.MustExec("use test") + tk.MustExec("drop table if exists UK_RP16939;") + // range partition + tk.MustExec(`CREATE TABLE UK_RP16939 ( + COL1 tinyint(16) DEFAULT '108' COMMENT 'NUMERIC UNIQUE INDEX', + COL2 varchar(20) DEFAULT NULL, + COL4 datetime DEFAULT NULL, + COL3 bigint(20) DEFAULT NULL, + COL5 float DEFAULT NULL, + UNIQUE KEY UK_COL1 (COL1) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin + PARTITION BY RANGE ( COL1+13 ) ( + PARTITION P0 VALUES LESS THAN (-44), + PARTITION P1 VALUES LESS THAN (-23), + PARTITION P2 VALUES LESS THAN (-22), + PARTITION P3 VALUES LESS THAN (63), + PARTITION P4 VALUES LESS THAN (75), + PARTITION P5 VALUES LESS THAN (90), + PARTITION PMX VALUES LESS THAN (MAXVALUE) + ) ;`) + query := "select col1, col2 from UK_RP16939 where col1 in (116, 48, -30);" + c.Assert(tk.HasPlan(query, "Batch_Point_Get"), IsFalse) + tk.MustQuery(query).Check(testkit.Rows()) + tk.MustExec("drop table if exists UK_RP16939;") + + // list parition + tk.MustExec(`CREATE TABLE UK_RP16939 ( + COL1 tinyint(16) DEFAULT '108' COMMENT 'NUMERIC UNIQUE INDEX', + COL2 varchar(20) DEFAULT NULL, + COL4 datetime DEFAULT NULL, + COL3 bigint(20) DEFAULT NULL, + COL5 float DEFAULT NULL, + UNIQUE KEY UK_COL1 (COL1) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin + PARTITION BY LIST ( COL1+13 ) ( + PARTITION P0 VALUES IN (-44, -23), + PARTITION P1 VALUES IN (-22, 63), + PARTITION P2 VALUES IN (75, 90) + ) ;`) + c.Assert(tk.HasPlan(query, "Batch_Point_Get"), IsFalse) + tk.MustQuery(query).Check(testkit.Rows()) + tk.MustExec("drop table if exists UK_RP16939;") +} + // issue #25320 func (s *testPointGetSuite) TestDistinctPlan(c *C) { tk := testkit.NewTestKit(c, s.store) diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 12c385e6f4500..d53d66f85b0f7 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -1589,22 +1589,17 @@ func getPartitionColumnPos(idx *model.IndexInfo, partitionExpr *tables.Partition } case model.PartitionTypeRange: // left range columns partition for future development - if len(pi.Columns) == 0 { - if col, ok := partitionExpr.Expr.(*expression.Column); ok { - colInfo := findColNameByColID(tbl.Columns, col) - partitionName = colInfo.Name - } + if col, ok := partitionExpr.Expr.(*expression.Column); ok && len(pi.Columns) == 0 { + colInfo := findColNameByColID(tbl.Columns, col) + partitionName = colInfo.Name } else { return 0, errors.Errorf("unsupported partition type in BatchGet") } case model.PartitionTypeList: // left list columns partition for future development - if partitionExpr.ForListPruning.ColPrunes == nil { - locateExpr := partitionExpr.ForListPruning.LocateExpr - if locateExpr, ok := locateExpr.(*expression.Column); ok { - colInfo := findColNameByColID(tbl.Columns, locateExpr) - partitionName = colInfo.Name - } + if locateExpr, ok := partitionExpr.ForListPruning.LocateExpr.(*expression.Column); ok && partitionExpr.ForListPruning.ColPrunes == nil { + colInfo := findColNameByColID(tbl.Columns, locateExpr) + partitionName = colInfo.Name } else { return 0, errors.Errorf("unsupported partition type in BatchGet") } From 793517dfafc32ecfbf9a9dabad1f72e26ad1c3de Mon Sep 17 00:00:00 2001 From: MyonKeminta <9948422+MyonKeminta@users.noreply.github.com> Date: Fri, 18 Jun 2021 11:38:38 +0800 Subject: [PATCH 08/25] config: Add Lock View's config to config example and test (#25470) --- config/config.go | 3 ++- config/config.toml.example | 3 +++ config/config_test.go | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 4e759cb5c6632..8b214f694ffcc 100644 --- a/config/config.go +++ b/config/config.go @@ -494,7 +494,8 @@ type Binlog struct { // PessimisticTxn is the config for pessimistic transaction. type PessimisticTxn struct { // The max count of retry for a single statement in a pessimistic transaction. - MaxRetryCount uint `toml:"max-retry-count" json:"max-retry-count"` + MaxRetryCount uint `toml:"max-retry-count" json:"max-retry-count"` + // The max count of deadlock events that will be recorded in the information_schema.deadlocks table. DeadlockHistoryCapacity uint `toml:"deadlock-history-capacity" json:"deadlock-history-capacity"` } diff --git a/config/config.toml.example b/config/config.toml.example index 03ac86c205235..909c40138055a 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -443,6 +443,9 @@ strategy = "range" # max retry count for a statement in a pessimistic transaction. max-retry-count = 256 +# The max count of deadlock events that will be recorded in the information_schema.deadlocks table. +deadlock-history-capacity = 10 + [stmt-summary] # enable statement summary. enable = true diff --git a/config/config_test.go b/config/config_test.go index 9533b8bf52975..f4092e1c7584c 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -230,6 +230,8 @@ group= "abc" zone= "dc-1" [security] spilled-file-encryption-method = "plaintext" +[pessimistic-txn] +deadlock-history-capacity = 123 `) c.Assert(err, IsNil) @@ -284,6 +286,7 @@ spilled-file-encryption-method = "plaintext" c.Assert(conf.EnableEnumLengthLimit, Equals, false) c.Assert(conf.EnableForwarding, Equals, true) c.Assert(conf.StoresRefreshInterval, Equals, uint64(30)) + c.Assert(conf.PessimisticTxn.DeadlockHistoryCapacity, Equals, uint(123)) _, err = f.WriteString(` [log.file] From 9c947dffe4efd452d35a118751a319efc4ed9c38 Mon Sep 17 00:00:00 2001 From: ZhuoZhi <517770911@qq.com> Date: Fri, 18 Jun 2021 11:50:38 +0800 Subject: [PATCH 09/25] ddl: fix create partition table error under NO_UNSIGNED_SUBTRACTION (#25435) --- ddl/ddl_api.go | 14 +++++++------ ddl/partition.go | 5 +++-- table/tables/partition_test.go | 37 ++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 8 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 63cc9ae5da432..a2cb983d533ac 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1590,15 +1590,17 @@ func checkTableInfoValidWithStmt(ctx sessionctx.Context, tbInfo *model.TableInfo if err := checkGeneratedColumn(s.Cols); err != nil { return errors.Trace(err) } - if tbInfo.Partition != nil && s.Partition != nil { + if tbInfo.Partition != nil { if err := checkPartitionDefinitionConstraints(ctx, tbInfo); err != nil { return errors.Trace(err) } - if err := checkPartitionFuncType(ctx, s.Partition.Expr, tbInfo); err != nil { - return errors.Trace(err) - } - if err := checkPartitioningKeysConstraints(ctx, s, tbInfo); err != nil { - return errors.Trace(err) + if s.Partition != nil { + if err := checkPartitionFuncType(ctx, s.Partition.Expr, tbInfo); err != nil { + return errors.Trace(err) + } + if err := checkPartitioningKeysConstraints(ctx, s, tbInfo); err != nil { + return errors.Trace(err) + } } } return nil diff --git a/ddl/partition.go b/ddl/partition.go index d3db56b9c702e..43e37a2ee27ad 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -524,7 +524,7 @@ func checkPartitionValuesIsInt(ctx sessionctx.Context, def *ast.PartitionDefinit switch val.Kind() { case types.KindUint64, types.KindNull: case types.KindInt64: - if mysql.HasUnsignedFlag(tp.Flag) && val.GetInt64() < 0 { + if !ctx.GetSessionVars().SQLMode.HasNoUnsignedSubtractionMode() && mysql.HasUnsignedFlag(tp.Flag) && val.GetInt64() < 0 { return ErrPartitionConstDomain.GenWithStackByArgs() } default: @@ -666,7 +666,8 @@ func checkRangePartitionValue(ctx sessionctx.Context, tblInfo *model.TableInfo) if strings.EqualFold(defs[len(defs)-1].LessThan[0], partitionMaxValue) { defs = defs[:len(defs)-1] } - isUnsigned := isColUnsigned(cols, pi) + // treat partition value under NoUnsignedSubtractionMode as signed + isUnsigned := isColUnsigned(cols, pi) && !ctx.GetSessionVars().SQLMode.HasNoUnsignedSubtractionMode() var prevRangeValue interface{} for i := 0; i < len(defs); i++ { if strings.EqualFold(defs[i].LessThan[0], partitionMaxValue) { diff --git a/table/tables/partition_test.go b/table/tables/partition_test.go index dc7d9956f0c60..b8a14dc5e997d 100644 --- a/table/tables/partition_test.go +++ b/table/tables/partition_test.go @@ -447,6 +447,43 @@ func (ts *testSuite) TestCreatePartitionTableNotSupport(c *C) { c.Assert(ddl.ErrPartitionFunctionIsNotAllowed.Equal(err), IsTrue) } +// issue 24880 +func (ts *testSuite) TestRangePartitionUnderNoUnsignedSub(c *C) { + tk := testkit.NewTestKitWithInit(c, ts.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists tu;") + tk.MustExec("SET @@sql_mode='NO_UNSIGNED_SUBTRACTION';") + tk.MustExec(`CREATE TABLE tu (c1 BIGINT UNSIGNED) PARTITION BY RANGE(c1 - 10) ( + PARTITION p0 VALUES LESS THAN (-5), + PARTITION p1 VALUES LESS THAN (0), + PARTITION p2 VALUES LESS THAN (5), + PARTITION p3 VALUES LESS THAN (10), + PARTITION p4 VALUES LESS THAN (MAXVALUE) + );`) + // currently not support insert records whose partition value is negative + ErrMsg1 := "[types:1690]BIGINT UNSIGNED value is out of range in '(tu.c1 - 10)'" + tk.MustGetErrMsg("insert into tu values (0);", ErrMsg1) + tk.MustGetErrMsg("insert into tu values (cast(1 as unsigned));", ErrMsg1) + tk.MustExec(("insert into tu values (cast(9223372036854775807 as unsigned));")) + // MySQL will not support c1 value bigger than 9223372036854775817 in this case + tk.MustExec(("insert into tu values (cast(18446744073709551615 as unsigned));")) + + // test `create table like` + ErrMsg2 := "[types:1690]BIGINT UNSIGNED value is out of range in '(tu2.c1 - 10)'" + tk.MustExec(`CREATE TABLE tu2 like tu;`) + // currently not support insert records whose partition value is negative + tk.MustGetErrMsg("insert into tu2 values (0);", ErrMsg2) + tk.MustGetErrMsg("insert into tu2 values (cast(1 as unsigned));", ErrMsg2) + tk.MustExec(("insert into tu2 values (cast(9223372036854775807 as unsigned));")) + // MySQL will not support c1 value bigger than 9223372036854775817 in this case + tk.MustExec(("insert into tu2 values (cast(18446744073709551615 as unsigned));")) + + // compatible with MySQL + ErrMsg3 := "[ddl:1493]VALUES LESS THAN value must be strictly increasing for each partition" + tk.MustExec("SET @@sql_mode='';") + tk.MustGetErrMsg(`CREATE TABLE tu3 like tu;`, ErrMsg3) +} + func (ts *testSuite) TestIntUint(c *C) { tk := testkit.NewTestKitWithInit(c, ts.store) tk.MustExec("use test") From 8945d64362731102bbb063ea6ecf67205db34ff5 Mon Sep 17 00:00:00 2001 From: Yiding Cui Date: Fri, 18 Jun 2021 12:04:38 +0800 Subject: [PATCH 10/25] planner: fix the panic that the index's range length may exceed its original column count (#25267) --- planner/core/cbo_test.go | 16 ++++++++++++++++ planner/core/logical_plans.go | 4 ++++ planner/core/stats.go | 6 +++++- statistics/selectivity.go | 7 ++++++- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index 84a42702e945e..1235fcd01ec37 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -1242,3 +1242,19 @@ func (s *testAnalyzeSuite) TestBatchPointGetTablePartition(c *C) { "3 0", )) } + +// TestAppendIntPkToIndexTailForRangeBuilding tests for issue25219 https://github.com/pingcap/tidb/issues/25219. +func (s *testAnalyzeSuite) TestAppendIntPkToIndexTailForRangeBuilding(c *C) { + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + tk := testkit.NewTestKit(c, store) + defer func() { + dom.Close() + store.Close() + }() + tk.MustExec("use test") + tk.MustExec("create table t25219(a int primary key, col3 int, col1 int, index idx(col3))") + tk.MustExec("insert into t25219 values(1, 1, 1)") + tk.MustExec("analyze table t25219") + tk.MustQuery("select * from t25219 WHERE (col3 IS NULL OR col1 IS NOT NULL AND col3 <= 6659) AND col3 = 1;").Check(testkit.Rows("1 1 1")) +} diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index c50d155520d8e..a657f18d420e4 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -835,6 +835,10 @@ func (ds *DataSource) fillIndexPath(path *util.AccessPath, conds []expression.Ex if !alreadyHandle { path.IdxCols = append(path.IdxCols, handleCol) path.IdxColLens = append(path.IdxColLens, types.UnspecifiedLength) + // Also updates the map that maps the index id to its prefix column ids. + if len(ds.tableStats.HistColl.Idx2ColumnIDs[path.Index.ID]) == len(path.Index.Columns) { + ds.tableStats.HistColl.Idx2ColumnIDs[path.Index.ID] = append(ds.tableStats.HistColl.Idx2ColumnIDs[path.Index.ID], handleCol.UniqueID) + } } } } diff --git a/planner/core/stats.go b/planner/core/stats.go index 0d81105d46c8b..2fc0e73f5c704 100644 --- a/planner/core/stats.go +++ b/planner/core/stats.go @@ -176,8 +176,12 @@ func (ds *DataSource) getGroupNDVs(colGroups [][]*expression.Column) []property. for idxID, idx := range tbl.Indices { colsLen := len(tbl.Idx2ColumnIDs[idxID]) // tbl.Idx2ColumnIDs may only contain the prefix of index columns. - if colsLen != len(idx.Info.Columns) { + // But it may exceeds the total index since the index would contain the handle column if it's not a unique index. + // We append the handle at fillIndexPath. + if colsLen < len(idx.Info.Columns) { continue + } else if colsLen > len(idx.Info.Columns) { + colsLen-- } idxCols := make([]int64, colsLen) copy(idxCols, tbl.Idx2ColumnIDs[idxID]) diff --git a/statistics/selectivity.go b/statistics/selectivity.go index 8f1606e167ebc..ea3eb58f4ce28 100644 --- a/statistics/selectivity.go +++ b/statistics/selectivity.go @@ -249,9 +249,14 @@ func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Exp idxCols := expression.FindPrefixOfIndex(extractedCols, coll.Idx2ColumnIDs[id]) if len(idxCols) > 0 { lengths := make([]int, 0, len(idxCols)) - for i := 0; i < len(idxCols); i++ { + for i := 0; i < len(idxCols) && i < len(idxInfo.Info.Columns); i++ { lengths = append(lengths, idxInfo.Info.Columns[i].Length) } + // If the found columns are more than the columns held by the index. We are appending the int pk to the tail of it. + // When storing index data to key-value store, we use (idx_col1, ...., idx_coln, handle_col) as its key. + if len(idxCols) > len(idxInfo.Info.Columns) { + lengths = append(lengths, types.UnspecifiedLength) + } maskCovered, ranges, partCover, err := getMaskAndRanges(ctx, remainedExprs, ranger.IndexRangeType, lengths, id2Paths[idxInfo.ID], idxCols...) if err != nil { return 0, nil, errors.Trace(err) From fce508702f1f0284ec8779021b31a8aa7238ecf1 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Fri, 18 Jun 2021 12:14:38 +0800 Subject: [PATCH 11/25] metrics: refine the SafeTS gap metric and panel (#25530) --- go.mod | 2 +- go.sum | 4 ++-- metrics/grafana/tidb.json | 32 ++++++++++++++++---------------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index ba30be44a7f50..e3970584da222 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/shirou/gopsutil v3.21.2+incompatible github.com/soheilhy/cmux v0.1.4 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.0-20210616060203-94f269a0f96a + github.com/tikv/client-go/v2 v2.0.0-20210617115813-8d4847a86878 github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d github.com/twmb/murmur3 v1.1.3 github.com/uber-go/atomic v1.4.0 diff --git a/go.sum b/go.sum index b2fbb66fa0136..0f275a1ae6b1b 100644 --- a/go.sum +++ b/go.sum @@ -558,8 +558,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tikv/client-go/v2 v2.0.0-20210616060203-94f269a0f96a h1:SQwO+ZJSvNGqHtpczt7rHD5tQci/XRWwH0BL4Uefhf0= -github.com/tikv/client-go/v2 v2.0.0-20210616060203-94f269a0f96a/go.mod h1:2ZWVKSfm6BWBY0dgH0h6o+vAzNuXWD/z0ihkMNv3J6E= +github.com/tikv/client-go/v2 v2.0.0-20210617115813-8d4847a86878 h1:gAg9PiWK0srOvdrJ49tV2AzbTJnarRm/56+xzvGDLUs= +github.com/tikv/client-go/v2 v2.0.0-20210617115813-8d4847a86878/go.mod h1:2ZWVKSfm6BWBY0dgH0h6o+vAzNuXWD/z0ihkMNv3J6E= github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d h1:K0XnvsnT6ofLDuM8Rt3PuFQO4p8bNraeHYstspD316g= github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d/go.mod h1:Jw9KG11C/23Rr7DW4XWQ7H5xOgGZo6DFL1OKAF4+Igw= github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M= diff --git a/metrics/grafana/tidb.json b/metrics/grafana/tidb.json index 911bacf1e09ed..1524e632c2d1c 100644 --- a/metrics/grafana/tidb.json +++ b/metrics/grafana/tidb.json @@ -5386,11 +5386,10 @@ "dashLength": 10, "dashes": false, "datasource": "${DS_TEST-CLUSTER}", - "description": "Bucketed histogram of the gap between SafeTS and current time.", + "description": "The gap between SafeTS and current time", "editable": true, "error": false, "fill": 1, - "fillGradient": 0, "grid": {}, "gridPos": { "h": 7, @@ -5402,21 +5401,21 @@ "legend": { "alignAsTable": true, "avg": false, - "current": false, - "max": false, + "current": true, + "hideEmpty": true, + "max": true, "min": false, "rightSide": true, "show": true, + "sort": "current", + "sortDesc": true, "total": false, - "values": false + "values": true }, "lines": true, "linewidth": 1, "links": [], "nullPointMode": "null as zero", - "options": { - "dataLinks": [] - }, "percentage": false, "pointradius": 5, "points": false, @@ -5427,23 +5426,24 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(tidb_tikvclient_safets_gap_seconds_bucket{tidb_cluster=\"$tidb_cluster\"}[1m])) by (le, instance))", + "expr": "sum(tidb_tikvclient_safets_gap_seconds{tidb_cluster=\"$tidb_cluster\", instance=~\"$instance\"}) by (instance, store)", "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{instance}}", - "refId": "A" + "intervalFactor": 2, + "legendFormat": "{{instance}}-store-{{store}}", + "refId": "A", + "step": 60 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99% SafeTS Gap Seconds", + "title": "Max SafeTS gap", "tooltip": { "msResolution": false, "shared": true, "sort": 0, - "value_type": "individual" + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -5455,11 +5455,11 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { From 5645edeec639b68f521e12db2d6d49a77b5a9d6e Mon Sep 17 00:00:00 2001 From: tiancaiamao Date: Fri, 18 Jun 2021 12:22:38 +0800 Subject: [PATCH 12/25] planner,executor: fix point get for update read panic on partition table (#25537) --- executor/partition_table_test.go | 10 ++++++++++ planner/core/find_best_task.go | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 3d4ade1776e54..9b864309f1e8c 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -3018,3 +3018,13 @@ PARTITION BY RANGE (a) ( s.testData.GetTestCases(c, &input, &output) s.verifyPartitionResult(tk, input, output) } + +func (s *partitionTableSuite) TestIssue25528(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("set @@tidb_partition_prune_mode = 'static'") + tk.MustExec("use test") + tk.MustExec("create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8") + tk.MustExec("insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20')") + tk.MustExec("begin pessimistic") + tk.MustQuery("select * from issue25528 where id = 1 for update").Check(testkit.Rows("1 100.00 -100.00 2021-06-17 22:35:20")) +} diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 8a2e41ec85a02..8802e1456021a 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -732,6 +732,15 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if hashPartColName == nil { canConvertPointGet = false } + } else { + // If the schema contains ExtraPidColID, do not convert to point get. + // Because the point get executor can not handle the extra partition ID column now. + for _, col := range ds.schema.Columns { + if col.ID == model.ExtraPidColID { + canConvertPointGet = false + break + } + } } } if canConvertPointGet { From 979f5356d36143332fc8d1500ac3c9d3a7366e4f Mon Sep 17 00:00:00 2001 From: xhe Date: Fri, 18 Jun 2021 12:46:38 +0800 Subject: [PATCH 13/25] infoschema: improve performance (#25536) --- infoschema/cache.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/infoschema/cache.go b/infoschema/cache.go index 34dba0cf95e42..d7c17f65716e3 100644 --- a/infoschema/cache.go +++ b/infoschema/cache.go @@ -20,6 +20,16 @@ import ( "github.com/pingcap/tidb/metrics" ) +var ( + getLatestCounter = metrics.InfoCacheCounters.WithLabelValues("get", "latest") + getTSCounter = metrics.InfoCacheCounters.WithLabelValues("get", "ts") + getVersionCounter = metrics.InfoCacheCounters.WithLabelValues("get", "version") + + hitLatestCounter = metrics.InfoCacheCounters.WithLabelValues("hit", "latest") + hitTSCounter = metrics.InfoCacheCounters.WithLabelValues("hit", "ts") + hitVersionCounter = metrics.InfoCacheCounters.WithLabelValues("hit", "version") +) + // InfoCache handles information schema, including getting and setting. // The cache behavior, however, is transparent and under automatic management. // It only promised to cache the infoschema, if it is newer than all the cached. @@ -40,9 +50,9 @@ func NewCache(capcity int) *InfoCache { func (h *InfoCache) GetLatest() InfoSchema { h.mu.RLock() defer h.mu.RUnlock() - metrics.InfoCacheCounters.WithLabelValues("get", "latest").Inc() + getLatestCounter.Inc() if len(h.cache) > 0 { - metrics.InfoCacheCounters.WithLabelValues("hit", "latest").Inc() + hitLatestCounter.Inc() return h.cache[0] } return nil @@ -52,12 +62,12 @@ func (h *InfoCache) GetLatest() InfoSchema { func (h *InfoCache) GetByVersion(version int64) InfoSchema { h.mu.RLock() defer h.mu.RUnlock() - metrics.InfoCacheCounters.WithLabelValues("get", "version").Inc() + getVersionCounter.Inc() i := sort.Search(len(h.cache), func(i int) bool { return h.cache[i].SchemaMetaVersion() <= version }) if i < len(h.cache) && h.cache[i].SchemaMetaVersion() == version { - metrics.InfoCacheCounters.WithLabelValues("hit", "version").Inc() + hitVersionCounter.Inc() return h.cache[i] } return nil @@ -70,10 +80,10 @@ func (h *InfoCache) GetBySnapshotTS(snapshotTS uint64) InfoSchema { h.mu.RLock() defer h.mu.RUnlock() - metrics.InfoCacheCounters.WithLabelValues("get", "ts").Inc() + getTSCounter.Inc() if snapshotTS >= h.maxUpdatedSnapshotTS { if len(h.cache) > 0 { - metrics.InfoCacheCounters.WithLabelValues("hit", "ts").Inc() + hitTSCounter.Inc() return h.cache[0] } } From 1314b08c8b34462767ce92bfbe9cf1fc2b12b7ee Mon Sep 17 00:00:00 2001 From: YinWeiling <30956512+YinWeiling@users.noreply.github.com> Date: Fri, 18 Jun 2021 13:26:38 +0800 Subject: [PATCH 14/25] telemetry: fix the bug about unnecessary error when run tidb only (#25264) --- telemetry/data_slow_query.go | 2 +- telemetry/data_window.go | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/telemetry/data_slow_query.go b/telemetry/data_slow_query.go index dd73097293e6c..74526f96a18f0 100644 --- a/telemetry/data_slow_query.go +++ b/telemetry/data_slow_query.go @@ -143,7 +143,7 @@ func init() { currentSQBInfo["+Inf"] = 0 if mysql.TiDBReleaseVersion != "None" { - logutil.BgLogger().Info("Telemetry slow query stats initialized", zap.String("currentSQBInfo", currentSQBInfo.String()), zap.String("lastSQBInfo", lastSQBInfo.String())) + logutil.BgLogger().Debug("Telemetry slow query stats initialized", zap.String("currentSQBInfo", currentSQBInfo.String()), zap.String("lastSQBInfo", lastSQBInfo.String())) } } diff --git a/telemetry/data_window.go b/telemetry/data_window.go index 24ebed9ea80bc..e8a429a3134ba 100644 --- a/telemetry/data_window.go +++ b/telemetry/data_window.go @@ -18,7 +18,6 @@ import ( "sync" "time" - "github.com/pingcap/errors" "github.com/pingcap/tidb/domain/infosync" "github.com/prometheus/client_golang/api" promv1 "github.com/prometheus/client_golang/api/prometheus/v1" @@ -110,12 +109,8 @@ func readSQLMetric(timepoint time.Time, SQLResult *sqlUsageData) error { promQL := "sum(tidb_executor_statement_total{}) by (instance,type)" result, err := querySQLMetric(ctx, timepoint, promQL) if err != nil { - if err1, ok := err.(*promv1.Error); ok { - return errors.Errorf("query metric error, msg: %v, detail: %v", err1.Msg, err1.Detail) - } - return errors.Errorf("query metric error: %v", err.Error()) + logutil.BgLogger().Warn("querySQLMetric got error") } - anylisSQLUsage(result, SQLResult) return nil } @@ -155,6 +150,9 @@ func querySQLMetric(ctx context.Context, queryTime time.Time, promQL string) (re } func anylisSQLUsage(promResult pmodel.Value, SQLResult *sqlUsageData) { + if promResult == nil { + return + } switch promResult.Type() { case pmodel.ValVector: matrix := promResult.(pmodel.Vector) From 5b392334c6d943a102ea792d382a3b732f676aa4 Mon Sep 17 00:00:00 2001 From: bb7133 Date: Fri, 18 Jun 2021 14:24:38 +0800 Subject: [PATCH 15/25] *: temporarily skip some unstable test cases. (#25548) --- ddl/db_test.go | 1 + executor/stale_txn_test.go | 1 + planner/core/prepare_test.go | 3 +++ server/tidb_test.go | 1 + 4 files changed, 6 insertions(+) diff --git a/ddl/db_test.go b/ddl/db_test.go index 7467a02e6894b..ed9b619e7aa3c 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -310,6 +310,7 @@ func (s *testDBSuite8) TestAddPrimaryKeyRollback2(c *C) { } func (s *testDBSuite2) TestAddUniqueIndexRollback(c *C) { + c.Skip("unstable, skip it and fix it before 20210702") hasNullValsInKey := false idxName := "c3_index" addIdxSQL := "create unique index c3_index on t1 (c3)" diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 35d65a9942a86..b02311f08fd58 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -828,6 +828,7 @@ func (s *testStaleTxnSuite) TestSetTransactionInfoSchema(c *C) { } func (s *testStaleTxnSuite) TestStaleSelect(c *C) { + c.Skip("unstable, skip it and fix it before 20210702") tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t") diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 1a8d8a496d38b..cb5fcf31de8b3 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -138,6 +138,7 @@ func (s *testPrepareSerialSuite) TestPrepareCache(c *C) { } func (s *testPrepareSerialSuite) TestPrepareCacheIndexScan(c *C) { + c.Skip("unstable, skip it and fix it before 20210702") defer testleak.AfterTest(c)() store, dom, err := newStoreWithBootstrap() c.Assert(err, IsNil) @@ -302,6 +303,7 @@ func (s *testPrepareSerialSuite) TestPrepareOverMaxPreparedStmtCount(c *C) { // unit test for issue https://github.com/pingcap/tidb/issues/8518 func (s *testPrepareSerialSuite) TestPrepareTableAsNameOnGroupByWithCache(c *C) { + c.Skip("unstable, skip it and fix it before 20210702") defer testleak.AfterTest(c)() store, dom, err := newStoreWithBootstrap() c.Assert(err, IsNil) @@ -993,6 +995,7 @@ func (s *testPrepareSuite) TestInvisibleIndex(c *C) { // Test for issue https://github.com/pingcap/tidb/issues/22167 func (s *testPrepareSerialSuite) TestPrepareCacheWithJoinTable(c *C) { + c.Skip("unstable, skip it and fix it before 20210702") defer testleak.AfterTest(c)() store, dom, err := newStoreWithBootstrap() c.Assert(err, IsNil) diff --git a/server/tidb_test.go b/server/tidb_test.go index a28bce7a52bbd..0953d4815d16e 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -1377,6 +1377,7 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLCPUProfile(c *C) { } func (ts *tidbTestTopSQLSuite) TestTopSQLAgent(c *C) { + c.Skip("unstable, skip it and fix it before 20210702") db, err := sql.Open("mysql", ts.getDSN()) c.Assert(err, IsNil, Commentf("Error connecting")) defer func() { From 79db8cb3a3e26837a880373eb2a432032f2a01a1 Mon Sep 17 00:00:00 2001 From: guo-shaoge Date: Fri, 18 Jun 2021 16:02:38 +0800 Subject: [PATCH 16/25] executor: make analyze test be stable (#25547) --- executor/analyze_test.go | 2 +- executor/join_test.go | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/executor/analyze_test.go b/executor/analyze_test.go index b7e015b94c892..b025cfc98937e 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -1038,7 +1038,7 @@ func (s *testSuite1) TestAnalyzeFullSamplingOnIndexWithVirtualColumnOrPrefixColu tk.MustQuery("show stats_topn where table_name = 'sampling_index_prefix_col' and column_name = 'idx'").Check(testkit.Rows("test sampling_index_prefix_col idx 1 a 3")) } -func (s *testSuite2) TestAnalyzeSamplingWorkPanic(c *C) { +func (s *testSerialSuite2) TestAnalyzeSamplingWorkPanic(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("set @@session.tidb_analyze_version = 2") diff --git a/executor/join_test.go b/executor/join_test.go index 3519b14893491..bfd0048a63b3d 100644 --- a/executor/join_test.go +++ b/executor/join_test.go @@ -1182,7 +1182,6 @@ func (s *testSuiteJoin1) TestIssue15850JoinNullValue(c *C) { } func (s *testSuiteJoin1) TestIndexLookupJoin(c *C) { - c.Skip("unstable, skip it and fix it before 20210622") tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("set @@tidb_init_chunk_size=2") From 9a3ed706cc282ebbbb6e28aab887e2c89945ab5a Mon Sep 17 00:00:00 2001 From: Arenatlx Date: Fri, 18 Jun 2021 16:32:38 +0800 Subject: [PATCH 17/25] ddl: fix cast datetime timestamp under column type change (#25560) --- ddl/column_type_change_test.go | 23 +++++++++++++++++++++++ ddl/db_test.go | 4 ++-- types/datum.go | 12 +++++++++++- 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index 05709080f6d54..53bec672c52e2 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -2106,3 +2106,26 @@ func (s *testColumnTypeChangeSuite) TestChangePrefixedIndexColumnToNonPrefixOne( tk.MustExec("create table t(a varchar(700), key(a(700)));") tk.MustGetErrCode("alter table t change column a a tinytext;", mysql.ErrBlobKeyWithoutLength) } + +// Fix issue https://github.com/pingcap/tidb/issues/25469 +func (s *testColumnTypeChangeSuite) TestCastToTimeStampDecodeError(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test;") + + tk.MustExec("CREATE TABLE `t` (" + + " `a` datetime DEFAULT '1764-06-11 02:46:14'" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin COMMENT='7b84832e-f857-4116-8872-82fc9dcc4ab3'") + tk.MustExec("insert into `t` values();") + tk.MustGetErrCode("alter table `t` change column `a` `b` TIMESTAMP NULL DEFAULT '2015-11-14 07:12:24';", mysql.ErrTruncatedWrongValue) + + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE `t` (" + + " `a` date DEFAULT '1764-06-11 02:46:14'" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_bin COMMENT='7b84832e-f857-4116-8872-82fc9dcc4ab3'") + tk.MustExec("insert into `t` values();") + tk.MustGetErrCode("alter table `t` change column `a` `b` TIMESTAMP NULL DEFAULT '2015-11-14 07:12:24';", mysql.ErrTruncatedWrongValue) + tk.MustExec("drop table if exists t") + + // Normal cast datetime to timestamp can succeed. + tk.MustQuery("select timestamp(cast('1000-11-11 12-3-1' as date));").Check(testkit.Rows("1000-11-11 00:00:00")) +} diff --git a/ddl/db_test.go b/ddl/db_test.go index ed9b619e7aa3c..6ca3c398b12aa 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -5110,8 +5110,8 @@ func (s *testDBSuite1) TestModifyColumnTime_DatetimeToTimestamp(c *C) { {"datetime", `20060102150405`, "timestamp", "2006-01-02 15:04:05", 0}, {"datetime", `060102150405`, "timestamp", "2006-01-02 15:04:05", 0}, {"datetime", `"2006-01-02 23:59:59.506"`, "timestamp", "2006-01-03 00:00:00", 0}, - {"datetime", `"1000-01-02 23:59:59"`, "timestamp", "1000-01-02 23:59:59", 0}, - {"datetime", `"9999-01-02 23:59:59"`, "timestamp", "9999-01-02 23:59:59", 0}, + {"datetime", `"1971-01-02 23:59:59"`, "timestamp", "1971-01-02 23:59:59", 0}, + {"datetime", `"2009-01-02 23:59:59"`, "timestamp", "2009-01-02 23:59:59", 0}, } testModifyColumnTime(c, s.store, tests) } diff --git a/types/datum.go b/types/datum.go index 95e20e1d1b2f3..8071139e0571f 100644 --- a/types/datum.go +++ b/types/datum.go @@ -1128,7 +1128,17 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi } switch d.k { case KindMysqlTime: - t = d.GetMysqlTime() + // `select timestamp(cast("1000-01-02 23:59:59" as date)); ` casts usage will succeed. + // Alter datetime("1000-01-02 23:59:59") to timestamp will error. + if sc.IsDDLJobInQueue { + t, err = d.GetMysqlTime().Convert(sc, target.Tp) + if err != nil { + ret.SetMysqlTime(t) + return ret, errors.Trace(ErrWrongValue.GenWithStackByArgs(DateTimeStr, t.String())) + } + } else { + t = d.GetMysqlTime() + } t, err = t.RoundFrac(sc, fsp) case KindMysqlDuration: t, err = d.GetMysqlDuration().ConvertToTime(sc, mysql.TypeTimestamp) From 799591a06fad765b6b6a5675c37c11c2981a848f Mon Sep 17 00:00:00 2001 From: Song Gao Date: Fri, 18 Jun 2021 17:13:38 +0800 Subject: [PATCH 18/25] session: read local dc replicas automatically for stale read (#25525) * fix select Signed-off-by: yisaer --- distsql/request_builder.go | 61 +++++++++++------ distsql/request_builder_test.go | 9 +++ executor/adapter.go | 7 +- executor/batch_point_get.go | 20 ++++-- executor/benchmark_test.go | 11 +-- executor/builder.go | 12 +++- executor/compiler.go | 1 + executor/coprocessor.go | 3 +- executor/distsql.go | 15 +++-- executor/executor_required_rows_test.go | 3 +- executor/index_merge_reader.go | 6 ++ executor/point_get.go | 26 +++++-- executor/stale_txn_test.go | 90 ++++++++++++++++++------- executor/table_reader.go | 11 ++- expression/builtin_time.go | 4 +- expression/integration_test.go | 5 +- kv/txn_scope_var.go | 4 ++ planner/core/preprocess.go | 39 ++++++++--- session/session.go | 2 +- session/session_test.go | 6 ++ 20 files changed, 251 insertions(+), 84 deletions(-) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 0fd44b044ae3b..423d0a2b1e785 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -19,6 +19,7 @@ import ( "sort" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/ddl/placement" @@ -33,20 +34,38 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tipb/go-tipb" + "github.com/tikv/client-go/v2/oracle" ) // RequestBuilder is used to build a "kv.Request". // It is called before we issue a kv request by "Select". type RequestBuilder struct { kv.Request - // txnScope indicates the value of txn_scope - txnScope string - is infoschema.InfoSchema - err error + is infoschema.InfoSchema + err error } // Build builds a "kv.Request". func (builder *RequestBuilder) Build() (*kv.Request, error) { + if builder.TxnScope == "" { + builder.TxnScope = oracle.GlobalTxnScope + } + if builder.IsStaleness && builder.TxnScope != kv.GlobalTxnScope { + builder.MatchStoreLabels = []*metapb.StoreLabel{ + { + Key: placement.DCLabelKey, + Value: builder.TxnScope, + }, + } + } + failpoint.Inject("assertRequestBuilderStalenessOption", func(val failpoint.Value) { + assertScope := val.(string) + if len(assertScope) > 0 { + if builder.IsStaleness && assertScope != builder.TxnScope { + panic("batch point get staleness option fail") + } + } + }) err := builder.verifyTxnScope() if err != nil { builder.err = err @@ -229,16 +248,6 @@ func (builder *RequestBuilder) SetFromSessionVars(sv *variable.SessionVars) *Req builder.Request.TaskID = sv.StmtCtx.TaskID builder.Request.Priority = builder.getKVPriority(sv) builder.Request.ReplicaRead = sv.GetReplicaRead() - builder.txnScope = sv.TxnCtx.TxnScope - builder.IsStaleness = sv.TxnCtx.IsStaleness - if builder.IsStaleness && builder.txnScope != kv.GlobalTxnScope { - builder.MatchStoreLabels = []*metapb.StoreLabel{ - { - Key: placement.DCLabelKey, - Value: builder.txnScope, - }, - } - } builder.SetResourceGroupTag(sv.StmtCtx) return builder } @@ -284,10 +293,10 @@ func (builder *RequestBuilder) SetResourceGroupTag(sc *stmtctx.StatementContext) } func (builder *RequestBuilder) verifyTxnScope() error { - if builder.txnScope == "" { - builder.txnScope = kv.GlobalTxnScope + if builder.TxnScope == "" { + builder.TxnScope = kv.GlobalTxnScope } - if builder.txnScope == kv.GlobalTxnScope || builder.is == nil { + if builder.TxnScope == kv.GlobalTxnScope || builder.is == nil { return nil } visitPhysicalTableID := make(map[int64]struct{}) @@ -301,7 +310,7 @@ func (builder *RequestBuilder) verifyTxnScope() error { } for phyTableID := range visitPhysicalTableID { - valid := VerifyTxnScope(builder.txnScope, phyTableID, builder.is) + valid := VerifyTxnScope(builder.TxnScope, phyTableID, builder.is) if !valid { var tblName string var partName string @@ -313,10 +322,10 @@ func (builder *RequestBuilder) verifyTxnScope() error { tblInfo, _ = builder.is.TableByID(phyTableID) tblName = tblInfo.Meta().Name.String() } - err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.txnScope) + err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.TxnScope) if len(partName) > 0 { err = fmt.Errorf("table %v's partition %v can not be read by %v txn_scope", - tblName, partName, builder.txnScope) + tblName, partName, builder.TxnScope) } return err } @@ -324,6 +333,18 @@ func (builder *RequestBuilder) verifyTxnScope() error { return nil } +// SetTxnScope sets request TxnScope +func (builder *RequestBuilder) SetTxnScope(scope string) *RequestBuilder { + builder.TxnScope = scope + return builder +} + +// SetIsStaleness sets request IsStaleness +func (builder *RequestBuilder) SetIsStaleness(is bool) *RequestBuilder { + builder.IsStaleness = is + return builder +} + // TableHandleRangesToKVRanges convert table handle ranges to "KeyRanges" for multiple tables. func TableHandleRangesToKVRanges(sc *stmtctx.StatementContext, tid []int64, isCommonHandle bool, ranges []*ranger.Range, fb *statistics.QueryFeedback) ([]kv.KeyRange, error) { if !isCommonHandle { diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index b9837f77c1ff1..d6a258a6067d7 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tipb/go-tipb" + "github.com/tikv/client-go/v2/oracle" ) var _ = Suite(&testSuite{}) @@ -323,6 +324,7 @@ func (s *testSuite) TestRequestBuilder1(c *C) { SyncLog: false, Streaming: false, ReplicaRead: kv.ReplicaReadLeader, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) } @@ -399,6 +401,7 @@ func (s *testSuite) TestRequestBuilder2(c *C) { SyncLog: false, Streaming: false, ReplicaRead: kv.ReplicaReadLeader, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) } @@ -446,6 +449,7 @@ func (s *testSuite) TestRequestBuilder3(c *C) { SyncLog: false, Streaming: false, ReplicaRead: kv.ReplicaReadLeader, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) } @@ -493,6 +497,7 @@ func (s *testSuite) TestRequestBuilder4(c *C) { NotFillCache: false, SyncLog: false, ReplicaRead: kv.ReplicaReadLeader, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) } @@ -536,6 +541,7 @@ func (s *testSuite) TestRequestBuilder5(c *C) { NotFillCache: true, SyncLog: false, Streaming: false, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) } @@ -569,6 +575,7 @@ func (s *testSuite) TestRequestBuilder6(c *C) { NotFillCache: true, SyncLog: false, Streaming: false, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) @@ -603,6 +610,7 @@ func (s *testSuite) TestRequestBuilder7(c *C) { SyncLog: false, Streaming: false, ReplicaRead: replicaRead, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) @@ -624,6 +632,7 @@ func (s *testSuite) TestRequestBuilder8(c *C) { Priority: 0, MemTracker: (*memory.Tracker)(nil), SchemaVar: 0, + TxnScope: oracle.GlobalTxnScope, } c.Assert(actual, DeepEquals, expect) } diff --git a/executor/adapter.go b/executor/adapter.go index 55e135093782e..ed2eaef17db30 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -191,6 +191,8 @@ type ExecStmt struct { SnapshotTS uint64 // ExplicitStaleness means whether the 'SELECT' clause are using 'AS OF TIMESTAMP' to perform stale read explicitly. ExplicitStaleness bool + // TxnScope indicates the scope the store selector scope the request visited + TxnScope string // InfoSchema stores a reference to the schema information. InfoSchema infoschema.InfoSchema // Plan stores a reference to the final physical plan. @@ -245,7 +247,7 @@ func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*rec } } if a.PsStmt.Executor == nil { - b := newExecutorBuilder(a.Ctx, is, a.Ti, a.SnapshotTS, a.ExplicitStaleness) + b := newExecutorBuilder(a.Ctx, is, a.Ti, a.SnapshotTS, a.ExplicitStaleness, a.TxnScope) newExecutor := b.build(a.Plan) if b.err != nil { return nil, b.err @@ -291,6 +293,7 @@ func (a *ExecStmt) RebuildPlan(ctx context.Context) (int64, error) { a.InfoSchema = ret.InfoSchema a.SnapshotTS = ret.LastSnapshotTS a.ExplicitStaleness = ret.ExplicitStaleness + a.TxnScope = ret.TxnScope p, names, err := planner.Optimize(ctx, a.Ctx, a.StmtNode, a.InfoSchema) if err != nil { return 0, err @@ -792,7 +795,7 @@ func (a *ExecStmt) buildExecutor() (Executor, error) { ctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow } - b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti, a.SnapshotTS, a.ExplicitStaleness) + b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti, a.SnapshotTS, a.ExplicitStaleness, a.TxnScope) e := b.build(a.Plan) if b.err != nil { return nil, errors.Trace(b.err) diff --git a/executor/batch_point_get.go b/executor/batch_point_get.go index 96e63453c803d..93778d76bc1ff 100644 --- a/executor/batch_point_get.go +++ b/executor/batch_point_get.go @@ -55,6 +55,8 @@ type BatchPointGetExec struct { partTblID int64 idxVals [][]types.Datum startTS uint64 + txnScope string + isStaleness bool snapshotTS uint64 txn kv.Transaction lock bool @@ -124,14 +126,22 @@ func (e *BatchPointGetExec) Open(context.Context) error { snapshot.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) } snapshot.SetOption(kv.TaskID, stmtCtx.TaskID) - snapshot.SetOption(kv.TxnScope, e.ctx.GetSessionVars().TxnCtx.TxnScope) - isStaleness := e.ctx.GetSessionVars().TxnCtx.IsStaleness - snapshot.SetOption(kv.IsStalenessReadOnly, isStaleness) - if isStaleness && e.ctx.GetSessionVars().TxnCtx.TxnScope != kv.GlobalTxnScope { + snapshot.SetOption(kv.TxnScope, e.txnScope) + snapshot.SetOption(kv.IsStalenessReadOnly, e.isStaleness) + failpoint.Inject("assertBatchPointStalenessOption", func(val failpoint.Value) { + assertScope := val.(string) + if len(assertScope) > 0 { + if e.isStaleness && assertScope != e.txnScope { + panic("batch point get staleness option fail") + } + } + }) + + if e.isStaleness && e.txnScope != kv.GlobalTxnScope { snapshot.SetOption(kv.MatchStoreLabels, []*metapb.StoreLabel{ { Key: placement.DCLabelKey, - Value: e.ctx.GetSessionVars().TxnCtx.TxnScope, + Value: e.txnScope, }, }) } diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index 4cd7bf09fcee9..6ff4022c5a8c8 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/stringutil" + "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap/zapcore" ) @@ -290,7 +291,7 @@ func buildHashAggExecutor(ctx sessionctx.Context, src Executor, schema *expressi plan.SetSchema(schema) plan.Init(ctx, nil, 0) plan.SetChildren(nil) - b := newExecutorBuilder(ctx, nil, nil, 0, false) + b := newExecutorBuilder(ctx, nil, nil, 0, false, oracle.GlobalTxnScope) exec := b.build(plan) hashAgg := exec.(*HashAggExec) hashAgg.children[0] = src @@ -342,7 +343,7 @@ func buildStreamAggExecutor(ctx sessionctx.Context, srcExec Executor, schema *ex plan = sg } - b := newExecutorBuilder(ctx, nil, nil, 0, false) + b := newExecutorBuilder(ctx, nil, nil, 0, false, oracle.GlobalTxnScope) return b.build(plan) } @@ -575,7 +576,7 @@ func buildWindowExecutor(ctx sessionctx.Context, windowFunc string, funcs int, f plan = win } - b := newExecutorBuilder(ctx, nil, nil, 0, false) + b := newExecutorBuilder(ctx, nil, nil, 0, false, oracle.GlobalTxnScope) exec := b.build(plan) return exec } @@ -1322,7 +1323,7 @@ func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, hashCols: tc.outerHashKeyIdx, }, innerCtx: innerCtx{ - readerBuilder: &dataReaderBuilder{Plan: &mockPhysicalIndexReader{e: innerDS}, executorBuilder: newExecutorBuilder(tc.ctx, nil, nil, 0, false)}, + readerBuilder: &dataReaderBuilder{Plan: &mockPhysicalIndexReader{e: innerDS}, executorBuilder: newExecutorBuilder(tc.ctx, nil, nil, 0, false, oracle.GlobalTxnScope)}, rowTypes: rightTypes, colLens: colLens, keyCols: tc.innerJoinKeyIdx, @@ -1388,7 +1389,7 @@ func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, inne compareFuncs: outerCompareFuncs, }, innerMergeCtx: innerMergeCtx{ - readerBuilder: &dataReaderBuilder{Plan: &mockPhysicalIndexReader{e: innerDS}, executorBuilder: newExecutorBuilder(tc.ctx, nil, nil, 0, false)}, + readerBuilder: &dataReaderBuilder{Plan: &mockPhysicalIndexReader{e: innerDS}, executorBuilder: newExecutorBuilder(tc.ctx, nil, nil, 0, false, oracle.GlobalTxnScope)}, rowTypes: rightTypes, joinKeys: innerJoinKeys, colLens: colLens, diff --git a/executor/builder.go b/executor/builder.go index b3bbdc579d6cb..c72a66e74c801 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -85,6 +85,7 @@ type executorBuilder struct { Ti *TelemetryInfo // ExplicitStaleness means whether the 'SELECT' clause are using 'AS OF TIMESTAMP' to perform stale read explicitly. explicitStaleness bool + txnScope string } // CTEStorages stores resTbl and iterInTbl for CTEExec. @@ -95,13 +96,14 @@ type CTEStorages struct { IterInTbl cteutil.Storage } -func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, snapshotTS uint64, explicitStaleness bool) *executorBuilder { +func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, snapshotTS uint64, explicitStaleness bool, txnScope string) *executorBuilder { return &executorBuilder{ ctx: ctx, is: is, Ti: ti, snapshotTS: snapshotTS, explicitStaleness: explicitStaleness, + txnScope: txnScope, } } @@ -2679,6 +2681,8 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, + txnScope: b.txnScope, + isStaleness: b.explicitStaleness, table: tbl, keepOrder: ts.KeepOrder, desc: ts.Desc, @@ -2950,6 +2954,8 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, + txnScope: b.txnScope, + isStaleness: b.explicitStaleness, physicalTableID: physicalTableID, table: tbl, index: is.Index, @@ -3566,6 +3572,8 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). + SetTxnScope(e.txnScope). + SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). Build() @@ -4078,6 +4086,8 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan idxInfo: plan.IndexInfo, rowDecoder: decoder, startTS: startTS, + txnScope: b.txnScope, + isStaleness: b.explicitStaleness, keepOrder: plan.KeepOrder, desc: plan.Desc, lock: plan.Lock, diff --git a/executor/compiler.go b/executor/compiler.go index 18b5b32979296..e5a6f9b0c6d69 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -72,6 +72,7 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStm GoCtx: ctx, SnapshotTS: ret.LastSnapshotTS, ExplicitStaleness: ret.ExplicitStaleness, + TxnScope: ret.TxnScope, InfoSchema: ret.InfoSchema, Plan: finalPlan, LowerPriority: lowerPriority, diff --git a/executor/coprocessor.go b/executor/coprocessor.go index 5eebd868cfb34..93f7dbcf40366 100644 --- a/executor/coprocessor.go +++ b/executor/coprocessor.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" + "github.com/tikv/client-go/v2/oracle" ) // CoprocessorDAGHandler uses to handle cop dag request. @@ -168,7 +169,7 @@ func (h *CoprocessorDAGHandler) buildDAGExecutor(req *coprocessor.Request) (Exec } plan = core.InjectExtraProjection(plan) // Build executor. - b := newExecutorBuilder(h.sctx, is, nil, 0, false) + b := newExecutorBuilder(h.sctx, is, nil, 0, false, oracle.GlobalTxnScope) return b.build(plan), nil } diff --git a/executor/distsql.go b/executor/distsql.go index b27a62c320c71..b6be537114d15 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -166,10 +166,11 @@ type IndexReaderExecutor struct { partRangeMap map[int64][]*ranger.Range // each partition may have different ranges // kvRanges are only used for union scan. - kvRanges []kv.KeyRange - dagPB *tipb.DAGRequest - startTS uint64 - + kvRanges []kv.KeyRange + dagPB *tipb.DAGRequest + startTS uint64 + txnScope string + isStaleness bool // result returns one or more distsql.PartialResult and each PartialResult is returned by one region. result distsql.SelectResult // columns are only required by union scan. @@ -290,6 +291,8 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). + SetTxnScope(e.txnScope). + SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). SetMemTracker(e.memTracker) @@ -548,6 +551,8 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.indexStreaming). + SetTxnScope(e.txnScope). + SetIsStaleness(e.explicitStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). SetMemTracker(tracker) @@ -645,6 +650,8 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookup table: table, dagPB: e.tableRequest, startTS: e.startTS, + txnScope: e.txnScope, + isStaleness: e.explicitStaleness, columns: e.columns, streaming: e.tableStreaming, feedback: statistics.NewQueryFeedback(0, nil, 0, false), diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index bb2766189ed52..44c9a5d910dfd 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" + "github.com/tikv/client-go/v2/oracle" ) type requiredRowsDataSource struct { @@ -843,7 +844,7 @@ func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, i j.CompareFuncs = append(j.CompareFuncs, expression.GetCmpFunction(nil, j.LeftJoinKeys[i], j.RightJoinKeys[i])) } - b := newExecutorBuilder(ctx, nil, nil, 0, false) + b := newExecutorBuilder(ctx, nil, nil, 0, false, oracle.GlobalTxnScope) return b.build(j) } diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 343ee4cb06105..fd7f052779c06 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -242,6 +242,8 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, SetDesc(e.descs[workID]). SetKeepOrder(false). SetStreaming(e.partialStreamings[workID]). + SetTxnScope(e.txnScope). + SetIsStaleness(e.explicitStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). SetMemTracker(e.memTracker). SetFromInfoSchema(e.ctx.GetInfoSchema()) @@ -326,6 +328,8 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, baseExecutor: newBaseExecutor(e.ctx, ts.Schema(), e.getPartitalPlanID(workID)), dagPB: e.dagPBs[workID], startTS: e.startTS, + txnScope: e.txnScope, + isStaleness: e.explicitStaleness, streaming: e.partialStreamings[workID], feedback: statistics.NewQueryFeedback(0, nil, 0, false), plans: e.partialPlans[workID], @@ -537,6 +541,8 @@ func (e *IndexMergeReaderExecutor) buildFinalTableReader(ctx context.Context, tb table: tbl, dagPB: e.tableRequest, startTS: e.startTS, + txnScope: e.txnScope, + isStaleness: e.explicitStaleness, streaming: e.tableStreaming, columns: e.columns, feedback: statistics.NewQueryFeedback(0, nil, 0, false), diff --git a/executor/point_get.go b/executor/point_get.go index f290408b0d4d0..8f9ad76275f6a 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -54,6 +54,8 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { } e := &PointGetExecutor{ baseExecutor: newBaseExecutor(b.ctx, p.Schema(), p.ID()), + txnScope: b.txnScope, + isStaleness: b.explicitStaleness, } e.base().initCap = 1 e.base().maxChunkSize = 1 @@ -76,6 +78,8 @@ type PointGetExecutor struct { handleVal []byte idxVals []types.Datum startTS uint64 + txnScope string + isStaleness bool txn kv.Transaction snapshot kv.Snapshot done bool @@ -154,17 +158,24 @@ func (e *PointGetExecutor) Open(context.Context) error { e.snapshot.SetOption(kv.ReplicaRead, kv.ReplicaReadFollower) } e.snapshot.SetOption(kv.TaskID, e.ctx.GetSessionVars().StmtCtx.TaskID) - e.snapshot.SetOption(kv.TxnScope, e.ctx.GetSessionVars().TxnCtx.TxnScope) - isStaleness := e.ctx.GetSessionVars().TxnCtx.IsStaleness - e.snapshot.SetOption(kv.IsStalenessReadOnly, isStaleness) - if isStaleness && e.ctx.GetSessionVars().TxnCtx.TxnScope != kv.GlobalTxnScope { + e.snapshot.SetOption(kv.TxnScope, e.txnScope) + e.snapshot.SetOption(kv.IsStalenessReadOnly, e.isStaleness) + if e.isStaleness && e.txnScope != kv.GlobalTxnScope { e.snapshot.SetOption(kv.MatchStoreLabels, []*metapb.StoreLabel{ { Key: placement.DCLabelKey, - Value: e.ctx.GetSessionVars().TxnCtx.TxnScope, + Value: e.txnScope, }, }) } + failpoint.Inject("assertPointStalenessOption", func(val failpoint.Value) { + assertScope := val.(string) + if len(assertScope) > 0 { + if e.isStaleness && assertScope != e.txnScope { + panic("batch point get staleness option fail") + } + } + }) setResourceGroupTagForTxn(e.ctx.GetSessionVars().StmtCtx, e.snapshot) return nil } @@ -395,7 +406,10 @@ func (e *PointGetExecutor) get(ctx context.Context, key kv.Key) ([]byte, error) } func (e *PointGetExecutor) verifyTxnScope() error { - txnScope := e.txn.GetOption(kv.TxnScope).(string) + if e.isStaleness { + return nil + } + txnScope := e.txnScope if txnScope == "" || txnScope == kv.GlobalTxnScope { return nil } diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index b02311f08fd58..e7cc5c91470c3 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -20,6 +20,7 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/testkit" @@ -235,49 +236,88 @@ func (s *testStaleTxnSerialSuite) TestSelectAsOf(c *C) { func (s *testStaleTxnSerialSuite) TestStaleReadKVRequest(c *C) { tk := testkit.NewTestKit(c, s.store) + safePointName := "tikv_gc_safe_point" + safePointValue := "20160102-15:04:05 -0700" + safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)" + updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s') + ON DUPLICATE KEY + UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment) + tk.MustExec(updateSafePoint) tk.MustExec("use test") tk.MustExec("drop table if exists t") + tk.MustExec(`drop table if exists t1`) + tk.MustExec(`drop table if exists t2`) tk.MustExec("create table t (id int primary key);") + tk.MustExec(`create table t1 (c int primary key, d int,e int,index idx_d(d),index idx_e(e))`) defer tk.MustExec(`drop table if exists t`) + defer tk.MustExec(`drop table if exists t1`) + conf := *config.GetGlobalConfig() + oldConf := conf + defer config.StoreGlobalConfig(&oldConf) + conf.Labels = map[string]string{ + placement.DCLabelKey: "sh", + } + config.StoreGlobalConfig(&conf) testcases := []struct { - name string - sql string - txnScope string - zone string + name string + sql string + assert string }{ { - name: "coprocessor read", - sql: "select * from t", - txnScope: "local", - zone: "sh", + name: "coprocessor read", + sql: "select * from t", + assert: "github.com/pingcap/distsql/assertRequestBuilderStalenessOption", }, { - name: "point get read", - sql: "select * from t where id = 1", - txnScope: "local", - zone: "bj", + name: "point get read", + sql: "select * from t where id = 1", + assert: "github.com/pingcap/tidb/executor/assertPointStalenessOption", }, { - name: "batch point get read", - sql: "select * from t where id in (1,2,3)", - txnScope: "local", - zone: "hz", + name: "batch point get read", + sql: "select * from t where id in (1,2,3)", + assert: "github.com/pingcap/tidb/executor/assertBatchPointStalenessOption", }, } for _, testcase := range testcases { - c.Log(testcase.name) - tk.MustExec(fmt.Sprintf("set @@txn_scope=%v", testcase.txnScope)) - failpoint.Enable("github.com/pingcap/tidb/config/injectTxnScope", fmt.Sprintf(`return("%v")`, testcase.zone)) - failpoint.Enable("tikvclient/assertStoreLabels", fmt.Sprintf(`return("%v_%v")`, placement.DCLabelKey, testcase.txnScope)) - failpoint.Enable("tikvclient/assertStaleReadFlag", `return(true)`) - // Using NOW() will cause the loss of fsp precision, so we use NOW(3) to be accurate to the millisecond. + failpoint.Enable(testcase.assert, `return("sh")`) tk.MustExec(`START TRANSACTION READ ONLY AS OF TIMESTAMP NOW(3);`) tk.MustQuery(testcase.sql) tk.MustExec(`commit`) + failpoint.Disable(testcase.assert) } - failpoint.Disable("github.com/pingcap/tidb/config/injectTxnScope") - failpoint.Disable("tikvclient/assertStoreLabels") - failpoint.Disable("tikvclient/assertStaleReadFlag") + for _, testcase := range testcases { + failpoint.Enable(testcase.assert, `return("sh")`) + tk.MustExec(`SET TRANSACTION READ ONLY AS OF TIMESTAMP NOW(3)`) + tk.MustExec(`begin;`) + tk.MustQuery(testcase.sql) + tk.MustExec(`commit`) + failpoint.Disable(testcase.assert) + } + tk.MustExec(`insert into t1 (c,d,e) values (1,1,1);`) + tk.MustExec(`insert into t1 (c,d,e) values (2,3,5);`) + time.Sleep(2 * time.Second) + tsv := time.Now().Format("2006-1-2 15:04:05.000") + tk.MustExec(`insert into t1 (c,d,e) values (3,3,7);`) + tk.MustExec(`insert into t1 (c,d,e) values (4,0,5);`) + tk.MustExec(`insert into t1 (c,d,e) values (5,0,5);`) + // IndexLookUp Reader Executor + rows1 := tk.MustQuery(fmt.Sprintf("select * from t1 AS OF TIMESTAMP '%v' use index (idx_d) where c < 5 and d < 5", tsv)).Rows() + c.Assert(rows1, HasLen, 2) + // IndexMerge Reader Executor + rows2 := tk.MustQuery(fmt.Sprintf("select /*+ USE_INDEX_MERGE(t1, idx_d, idx_e) */ * from t1 AS OF TIMESTAMP '%v' where c <5 and (d =5 or e=5)", tsv)).Rows() + c.Assert(rows2, HasLen, 1) + // TableReader Executor + rows3 := tk.MustQuery(fmt.Sprintf("select * from t1 AS OF TIMESTAMP '%v' where c < 6", tsv)).Rows() + c.Assert(rows3, HasLen, 2) + // IndexReader Executor + rows4 := tk.MustQuery(fmt.Sprintf("select /*+ USE_INDEX(t1, idx_d) */ d from t1 AS OF TIMESTAMP '%v' where c < 5 and d < 1;", tsv)).Rows() + c.Assert(rows4, HasLen, 0) + // point get executor + rows5 := tk.MustQuery(fmt.Sprintf("select * from t1 AS OF TIMESTAMP '%v' where c = 3;", tsv)).Rows() + c.Assert(rows5, HasLen, 0) + rows6 := tk.MustQuery(fmt.Sprintf("select * from t1 AS OF TIMESTAMP '%v' where c in (3,4,5);", tsv)).Rows() + c.Assert(rows6, HasLen, 0) } func (s *testStaleTxnSuite) TestStalenessAndHistoryRead(c *C) { diff --git a/executor/table_reader.go b/executor/table_reader.go index 6c8f417a7f02e..0ab10a6f829dd 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -74,9 +74,11 @@ type TableReaderExecutor struct { ranges []*ranger.Range // kvRanges are only use for union scan. - kvRanges []kv.KeyRange - dagPB *tipb.DAGRequest - startTS uint64 + kvRanges []kv.KeyRange + dagPB *tipb.DAGRequest + startTS uint64 + txnScope string + isStaleness bool // columns are only required by union scan and virtual column. columns []*model.ColumnInfo @@ -324,6 +326,7 @@ func (e *TableReaderExecutor) buildKVReqSeparately(ctx context.Context, ranges [ SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). + SetTxnScope(e.txnScope). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). SetMemTracker(e.memTracker). @@ -355,6 +358,8 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). + SetTxnScope(e.txnScope). + SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). SetMemTracker(e.memTracker). diff --git a/expression/builtin_time.go b/expression/builtin_time.go index aa734b7553674..acf9ce5e219a6 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -7180,8 +7181,9 @@ func (b *builtinTiDBBoundedStalenessSig) evalTime(row chunk.Row) (types.Time, bo func getMinSafeTime(sessionCtx sessionctx.Context, timeZone *time.Location) time.Time { var minSafeTS uint64 + _, txnScope := config.GetTxnScopeFromConfig() if store := sessionCtx.GetStore(); store != nil { - minSafeTS = store.GetMinSafeTS(sessionCtx.GetSessionVars().CheckAndGetTxnScope()) + minSafeTS = store.GetMinSafeTS(txnScope) } // Inject mocked SafeTS for test. failpoint.Inject("injectSafeTS", func(val failpoint.Value) { diff --git a/expression/integration_test.go b/expression/integration_test.go index c0d729263166e..41e04af3b0c9e 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -8925,6 +8925,7 @@ PARTITION BY RANGE (c) ( PARTITION p0 VALUES LESS THAN (6), PARTITION p1 VALUES LESS THAN (11) );`) + defer tk.MustExec("drop table if exists t1") tk.MustExec(`insert into t1 (c,d,e) values (1,1,1);`) tk.MustExec(`insert into t1 (c,d,e) values (2,3,5);`) @@ -9035,6 +9036,7 @@ PARTITION BY RANGE (c) ( failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, testcase.zone)) tk.MustExec(fmt.Sprintf("set @@txn_scope='%v'", testcase.txnScope)) + tk.Exec("begin") res, err := tk.Exec(testcase.sql) _, resErr := session.GetRows4Test(context.Background(), tk.Se, res) var checkErr error @@ -9052,8 +9054,9 @@ PARTITION BY RANGE (c) ( if res != nil { res.Close() } - failpoint.Disable("tikvclient/injectTxnScope") + tk.Exec("commit") } + failpoint.Disable("tikvclient/injectTxnScope") } func (s *testIntegrationSerialSuite) TestCollationUnion(c *C) { diff --git a/kv/txn_scope_var.go b/kv/txn_scope_var.go index 8139430e432de..df1070c7a33e6 100644 --- a/kv/txn_scope_var.go +++ b/kv/txn_scope_var.go @@ -51,7 +51,11 @@ func (t TxnScopeVar) GetVarValue() string { } // GetTxnScope returns the value of the tidb-server holds to request tso to pd. +// When varValue is 'global`, directly return global here func (t TxnScopeVar) GetTxnScope() string { + if t.varValue == GlobalTxnScope { + return GlobalTxnScope + } return t.txnScope } diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 15da5da84d4eb..9c3b053cff5e6 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" @@ -38,6 +39,7 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/domainutil" utilparser "github.com/pingcap/tidb/util/parser" + "github.com/tikv/client-go/v2/oracle" ) // PreprocessOpt presents optional parameters to `Preprocess` method. @@ -134,6 +136,7 @@ type PreprocessorReturn struct { // otherwise it defaults to zero LastSnapshotTS uint64 InfoSchema infoschema.InfoSchema + TxnScope string } // preprocessor is an ast.Visitor that preprocess @@ -1456,9 +1459,27 @@ func (p *preprocessor) checkFuncCastExpr(node *ast.FuncCastExpr) { } // handleAsOfAndReadTS tries to handle as of closure, or possibly read_ts. -// If read_ts is not nil, it will be consumed. -// If as of is not nil, timestamp is used to get the history infoschema from the infocache. func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { + // When statement is during the Txn, we check whether there exists AsOfClause. If exists, we will return error, + // otherwise we should directly set the return param from TxnCtx. + p.TxnScope = oracle.GlobalTxnScope + if p.ctx.GetSessionVars().InTxn() { + if node != nil { + p.err = ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.") + return + } + txnCtx := p.ctx.GetSessionVars().TxnCtx + p.TxnScope = txnCtx.TxnScope + if txnCtx.IsStaleness { + p.LastSnapshotTS = txnCtx.StartTS + p.ExplicitStaleness = txnCtx.IsStaleness + p.initedLastSnapshotTS = true + return + } + } + // If the statement is in auto-commit mode, we will check whether there exists read_ts, if exists, + // we will directly use it. The txnScope will be defined by the zone label, if it is not set, we will use + // global txnScope directly. ts := p.ctx.GetSessionVars().TxnReadTS.UseTxnReadTS() if ts > 0 { if node != nil { @@ -1470,14 +1491,10 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { return ts, nil } p.LastSnapshotTS = ts - p.ExplicitStaleness = true + p.setStalenessReturn() } } if node != nil { - if p.ctx.GetSessionVars().InTxn() { - p.err = ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.") - return - } ts, p.err = calculateTsExpr(p.ctx, node) if p.err != nil { return @@ -1487,7 +1504,7 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { return calculateTsExpr(ctx, node) } p.LastSnapshotTS = ts - p.ExplicitStaleness = true + p.setStalenessReturn() } } if p.LastSnapshotTS != ts { @@ -1515,3 +1532,9 @@ func (p *preprocessor) ensureInfoSchema() infoschema.InfoSchema { } return p.InfoSchema } + +func (p *preprocessor) setStalenessReturn() { + _, txnScope := config.GetTxnScopeFromConfig() + p.ExplicitStaleness = true + p.TxnScope = txnScope +} diff --git a/session/session.go b/session/session.go index b504392f02174..0ff311d53c052 100644 --- a/session/session.go +++ b/session/session.go @@ -2083,7 +2083,7 @@ func (s *session) NewStaleTxnWithStartTS(ctx context.Context, startTS uint64) er if err := s.checkBeforeNewTxn(ctx); err != nil { return err } - txnScope := s.GetSessionVars().CheckAndGetTxnScope() + _, txnScope := config.GetTxnScopeFromConfig() txn, err := s.store.BeginWithOption(tikv.DefaultStartTSOption().SetTxnScope(txnScope).SetStartTS(startTS)) if err != nil { return err diff --git a/session/session_test.go b/session/session_test.go index 5e39931b35577..c7733ba579df3 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -3508,9 +3508,13 @@ PARTITION BY RANGE (c) ( // test wrong scope local txn auto commit _, err = tk.Exec("insert into t1 (c) values (101)") // write dc-2 with dc-1 scope + c.Assert(err, NotNil) c.Assert(err.Error(), Matches, ".*out of txn_scope.*") + tk.MustExec("begin") err = tk.ExecToErr("select * from t1 where c > 100") // read dc-2 with dc-1 scope + c.Assert(err, NotNil) c.Assert(err.Error(), Matches, ".*can not be read by.*") + tk.MustExec("commit") // begin and commit reading & writing the data in dc-2 with dc-1 txn scope tk.MustExec("begin") @@ -3520,12 +3524,14 @@ PARTITION BY RANGE (c) ( c.Assert(txn.Valid(), IsTrue) tk.MustExec("insert into t1 (c) values (101)") // write dc-2 with dc-1 scope err = tk.ExecToErr("select * from t1 where c > 100") // read dc-2 with dc-1 scope + c.Assert(err, NotNil) c.Assert(err.Error(), Matches, ".*can not be read by.*") tk.MustExec("insert into t1 (c) values (99)") // write dc-1 with dc-1 scope result = tk.MustQuery("select * from t1 where c < 100") // read dc-1 with dc-1 scope c.Assert(len(result.Rows()), Equals, 5) c.Assert(txn.Valid(), IsTrue) _, err = tk.Exec("commit") + c.Assert(err, NotNil) c.Assert(err.Error(), Matches, ".*out of txn_scope.*") // Won't read the value 99 because the previous commit failed result = tk.MustQuery("select * from t1 where c < 100") // read dc-1 with dc-1 scope From 5794e8ccd0ba2df0d5545d0694d1042343717f02 Mon Sep 17 00:00:00 2001 From: xhe Date: Fri, 18 Jun 2021 18:02:38 +0800 Subject: [PATCH 19/25] executor: forbid stale read table with tiflash (#25561) --- executor/builder.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/executor/builder.go b/executor/builder.go index c72a66e74c801..b3fa9e5c7c084 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2755,6 +2755,10 @@ func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Exe // buildTableReader builds a table reader executor. It first build a no range table reader, // and then update it ranges from table scan plan. func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) Executor { + if v.StoreType != kv.TiKV && b.explicitStaleness { + b.err = errors.New("stale requests require tikv backend") + return nil + } failpoint.Inject("checkUseMPP", func(val failpoint.Value) { if val.(bool) != useMPPExecution(b.ctx, v) { if val.(bool) { From 712fb1c55b4870f376fff932f6a90bae80c3f68f Mon Sep 17 00:00:00 2001 From: bb7133 Date: Sat, 19 Jun 2021 21:54:52 +0800 Subject: [PATCH 20/25] *: update parser to fix ODBC-styled literal (#25570) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e3970584da222..3d65c5ae20cb0 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 github.com/pingcap/kvproto v0.0.0-20210611081648-a215b4e61d2f github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 - github.com/pingcap/parser v0.0.0-20210610080504-cb77169bfed9 + github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4 github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible github.com/pingcap/tipb v0.0.0-20210603161937-cfb5a9225f95 diff --git a/go.sum b/go.sum index 0f275a1ae6b1b..1f35fe974ffa0 100644 --- a/go.sum +++ b/go.sum @@ -443,8 +443,8 @@ github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIf github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 h1:ERrF0fTuIOnwfGbt71Ji3DKbOEaP189tjym50u8gpC8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= -github.com/pingcap/parser v0.0.0-20210610080504-cb77169bfed9 h1:Y6kdGSXaL2cW2qkRjOyCSSaijy1FNryMn50RX6I5R2o= -github.com/pingcap/parser v0.0.0-20210610080504-cb77169bfed9/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= +github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4 h1:NASsbyMTNW8pbYfoO/YTykO6MQJiNRa094lwCPU6R2Q= +github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 h1:A9KL9R+lWSVPH8IqUuH1QSTRJ5FGoY1bT2IcfPKsWD8= github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041UWP+NqYzrJ3fMgC/Hw9wnmQ/tUkp/JaHly8= From 18589bfb31250d1d7404f76f5793abdd2a3cb53c Mon Sep 17 00:00:00 2001 From: Qi Yu Date: Mon, 21 Jun 2021 10:48:52 +0800 Subject: [PATCH 21/25] infoschema: version of tiflash is inconsistent with other component in table information_schema.cluster_info (#24694) --- infoschema/tables.go | 17 +++++++++++++---- infoschema/tables_test.go | 12 ++++++++++-- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/infoschema/tables.go b/infoschema/tables.go index cc66f0afd6cd0..1b82cec973dda 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -1521,7 +1521,7 @@ func GetTiDBServerInfo(ctx sessionctx.Context) ([]ServerInfo, error) { ServerType: "tidb", Address: fmt.Sprintf("%s:%d", node.IP, node.Port), StatusAddr: fmt.Sprintf("%s:%d", node.IP, node.StatusPort), - Version: FormatVersion(node.Version, isDefaultVersion), + Version: FormatTiDBVersion(node.Version, isDefaultVersion), GitHash: node.GitHash, StartTimestamp: node.StartTimestamp, ServerID: node.ServerIDGetter(), @@ -1530,9 +1530,9 @@ func GetTiDBServerInfo(ctx sessionctx.Context) ([]ServerInfo, error) { return servers, nil } -// FormatVersion make TiDBVersion consistent to TiKV and PD. +// FormatTiDBVersion make TiDBVersion consistent to TiKV and PD. // The default TiDBVersion is 5.7.25-TiDB-${TiDBReleaseVersion}. -func FormatVersion(TiDBVersion string, isDefaultVersion bool) string { +func FormatTiDBVersion(TiDBVersion string, isDefaultVersion bool) string { var version, nodeVersion string // The user hasn't set the config 'ServerVersion'. @@ -1662,11 +1662,12 @@ func GetStoreServerInfo(ctx sessionctx.Context) ([]ServerInfo, error) { } else { tp = tikv.GetStoreTypeByMeta(store).Name() } + servers = append(servers, ServerInfo{ ServerType: tp, Address: store.Address, StatusAddr: store.StatusAddress, - Version: store.Version, + Version: FormatStoreServerVersion(store.Version), GitHash: store.GitHash, StartTimestamp: store.StartTimestamp, }) @@ -1674,6 +1675,14 @@ func GetStoreServerInfo(ctx sessionctx.Context) ([]ServerInfo, error) { return servers, nil } +// FormatStoreServerVersion format version of store servers(Tikv or TiFlash) +func FormatStoreServerVersion(version string) string { + if len(version) >= 1 && version[0] == 'v' { + version = version[1:] + } + return version +} + // GetTiFlashStoreCount returns the count of tiflash server. func GetTiFlashStoreCount(ctx sessionctx.Context) (cnt uint64, err error) { failpoint.Inject("mockTiFlashStoreCount", func(val failpoint.Value) { diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 38857e40b9563..3a6c8962a37d4 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -945,7 +945,7 @@ func (s *testTableSuite) TestFormatVersion(c *C) { defaultVersions := []string{"5.7.25-TiDB-None", "5.7.25-TiDB-8.0.18", "5.7.25-TiDB-8.0.18-beta.1", "5.7.25-TiDB-v4.0.0-beta-446-g5268094af"} defaultRes := []string{"None", "8.0.18", "8.0.18-beta.1", "4.0.0-beta"} for i, v := range defaultVersions { - version := infoschema.FormatVersion(v, true) + version := infoschema.FormatTiDBVersion(v, true) c.Assert(version, Equals, defaultRes[i]) } @@ -953,9 +953,17 @@ func (s *testTableSuite) TestFormatVersion(c *C) { versions := []string{"8.0.18", "5.7.25-TiDB", "8.0.18-TiDB-4.0.0-beta.1"} res := []string{"8.0.18", "5.7.25-TiDB", "8.0.18-TiDB-4.0.0-beta.1"} for i, v := range versions { - version := infoschema.FormatVersion(v, false) + version := infoschema.FormatTiDBVersion(v, false) c.Assert(version, Equals, res[i]) } + + versions = []string{"v4.0.12", "4.0.12", "v5.0.1"} + resultVersion := []string{"4.0.12", "4.0.12", "5.0.1"} + + for i, versionString := range versions { + c.Assert(resultVersion[i], Equals, infoschema.FormatStoreServerVersion(versionString)) + } + } // Test statements_summary. From a6f52fb4500686cbd7d63271ced557c1fc152803 Mon Sep 17 00:00:00 2001 From: Qi Yu Date: Mon, 21 Jun 2021 11:06:52 +0800 Subject: [PATCH 22/25] types: warning information is inconsistent with MySQL when convert string to double/float (#24889) --- executor/show_test.go | 4 ++-- executor/write_test.go | 12 ++++++------ expression/integration_test.go | 6 +++--- types/convert.go | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/executor/show_test.go b/executor/show_test.go index 4109559b30928..f3ae4d63fc0a1 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -104,9 +104,9 @@ func (s *testSuite5) TestShowWarnings(c *C) { tk.MustExec("set @@sql_mode=''") tk.MustExec("insert show_warnings values ('a')") c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(1)) - tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect FLOAT value: 'a'")) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect DOUBLE value: 'a'")) c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0)) - tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect FLOAT value: 'a'")) + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect DOUBLE value: 'a'")) c.Assert(tk.Se.GetSessionVars().StmtCtx.WarningCount(), Equals, uint16(0)) // Test Warning level 'Error' diff --git a/executor/write_test.go b/executor/write_test.go index 7d39df1b71939..a4f6d6c64faa6 100644 --- a/executor/write_test.go +++ b/executor/write_test.go @@ -522,13 +522,13 @@ func (s *testSuite4) TestInsertIgnore(c *C) { c.Assert(err, IsNil) tk.CheckLastMessage("Records: 1 Duplicates: 0 Warnings: 1") r = tk.MustQuery("SHOW WARNINGS") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: '1a'")) + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '1a'")) testSQL = "insert ignore into t values ('1a')" _, err = tk.Exec(testSQL) c.Assert(err, IsNil) tk.CheckLastMessage("") r = tk.MustQuery("SHOW WARNINGS") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: '1a'")) + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '1a'")) // for duplicates with warning testSQL = `drop table if exists t; @@ -1467,7 +1467,7 @@ func (s *testSuite8) TestUpdate(c *C) { _, err = tk.Exec("update ignore t set a = 1 where a = (select '2a')") c.Assert(err, IsNil) r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: '2a'", "Warning 1292 Truncated incorrect FLOAT value: '2a'", "Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1062 Duplicate entry '1' for key 'PRIMARY'")) tk.MustExec("update ignore t set a = 42 where a = 2;") tk.MustQuery("select * from t").Check(testkit.Rows("1", "42")) @@ -1736,7 +1736,7 @@ func (s *testSuite4) TestPartitionedTableUpdate(c *C) { _, err = tk.Exec("update ignore t set a = 1 where a = (select '2a')") c.Assert(err, IsNil) r = tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: '2a'", "Warning 1292 Truncated incorrect FLOAT value: '2a'")) + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'")) // test update ignore for unique key tk.MustExec("drop table if exists t;") @@ -1898,7 +1898,7 @@ func (s *testSuite) TestDelete(c *C) { c.Assert(err, IsNil) tk.CheckExecResult(1, 0) r := tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: '2a'", "Warning 1292 Truncated incorrect FLOAT value: '2a'")) + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'")) tk.MustExec(`delete from delete_test ;`) tk.CheckExecResult(1, 0) @@ -1949,7 +1949,7 @@ func (s *testSuite4) TestPartitionedTableDelete(c *C) { c.Assert(err, IsNil) tk.CheckExecResult(1, 0) r := tk.MustQuery("SHOW WARNINGS;") - r.Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: '2a'", "Warning 1292 Truncated incorrect FLOAT value: '2a'")) + r.Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: '2a'", "Warning 1292 Truncated incorrect DOUBLE value: '2a'")) // Test delete without using index, involve multiple partitions. tk.MustExec("delete from t ignore index(id) where id >= 13 and id <= 17") diff --git a/expression/integration_test.go b/expression/integration_test.go index 41e04af3b0c9e..c1c5020e03c67 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -306,7 +306,7 @@ func (s *testIntegrationSuite) TestMiscellaneousBuiltin(c *C) { tk.MustQuery("select sleep(1);").Check(testkit.Rows("0")) tk.MustQuery("select sleep(0);").Check(testkit.Rows("0")) tk.MustQuery("select sleep('a');").Check(testkit.Rows("0")) - tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: 'a'")) + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: 'a'")) rs, err := tk.Exec("select sleep(-1);") c.Assert(err, IsNil) c.Assert(rs, NotNil) @@ -8037,9 +8037,9 @@ func (s *testIntegrationSerialSuite) TestIssue11177(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustQuery("SELECT 'lvuleck' BETWEEN '2008-09-16 22:23:50' AND 0;").Check(testkit.Rows("0")) - tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: 'lvuleck'", "Warning 1292 Truncated incorrect FLOAT value: '2008-09-16 22:23:50'")) + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: 'lvuleck'", "Warning 1292 Truncated incorrect DOUBLE value: '2008-09-16 22:23:50'")) tk.MustQuery("SELECT 'aa' BETWEEN 'bb' AND 0;").Check(testkit.Rows("1")) - tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect FLOAT value: 'aa'", "Warning 1292 Truncated incorrect FLOAT value: 'bb'")) + tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect DOUBLE value: 'aa'", "Warning 1292 Truncated incorrect DOUBLE value: 'bb'")) tk.MustQuery("select 1 between 0 and b'110';").Check(testkit.Rows("1")) tk.MustQuery("show warnings;").Check(testkit.Rows()) tk.MustQuery("select 'b' between 'a' and b'110';").Check(testkit.Rows("0")) diff --git a/types/convert.go b/types/convert.go index ee32a73a30f1d..f3ab3c69ad807 100644 --- a/types/convert.go +++ b/types/convert.go @@ -710,7 +710,7 @@ func getValidFloatPrefix(sc *stmtctx.StatementContext, s string, isFuncCast bool valid = "0" } if validLen == 0 || validLen != len(s) { - err = errors.Trace(sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("FLOAT", s))) + err = errors.Trace(sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("DOUBLE", s))) } return valid, err } From 58d8b960fa69fe829cf328fe486f8f14d0d9a2b3 Mon Sep 17 00:00:00 2001 From: baishen Date: Sun, 20 Jun 2021 22:42:53 -0500 Subject: [PATCH 23/25] execution: fix dayofweek('0000-00-00') behavior (#21508) --- executor/insert_common.go | 2 +- expression/integration_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/executor/insert_common.go b/executor/insert_common.go index 9970c2409c433..70345de2058d3 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -343,7 +343,7 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression e.evalBuffer.SetDatums(row...) for i, expr := range list { val, err := expr.Eval(e.evalBuffer.ToRow()) - if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { + if err != nil { return nil, err } val1, err := table.CastValue(e.ctx, val, e.insertColumns[i].ToInfo(), false, false) diff --git a/expression/integration_test.go b/expression/integration_test.go index c1c5020e03c67..1e0e8e5c7f0d4 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -1918,7 +1918,7 @@ func (s *testIntegrationSuite2) TestTimeBuiltin(c *C) { tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'") _, err = tk.Exec("insert into t value(dayOfWeek('0000-00-00'))") - c.Assert(table.ErrTruncatedWrongValueForField.Equal(err), IsTrue, Commentf("%v", err)) + c.Assert(types.ErrWrongValue.Equal(err), IsTrue, Commentf("%v", err)) _, err = tk.Exec(`update t set a = dayOfWeek("0000-00-00")`) c.Assert(types.ErrWrongValue.Equal(err), IsTrue) _, err = tk.Exec(`delete from t where a = dayOfWeek(123)`) @@ -1933,7 +1933,7 @@ func (s *testIntegrationSuite2) TestTimeBuiltin(c *C) { tk.MustExec(`update t set a = dayOfMonth("0000-00-00")`) tk.MustExec("set sql_mode = 'NO_ZERO_DATE,STRICT_TRANS_TABLES';") _, err = tk.Exec("insert into t value(dayOfMonth('0000-00-00'))") - c.Assert(table.ErrTruncatedWrongValueForField.Equal(err), IsTrue) + c.Assert(types.ErrWrongValue.Equal(err), IsTrue) tk.MustExec("insert into t value(0)") _, err = tk.Exec(`update t set a = dayOfMonth("0000-00-00")`) c.Assert(types.ErrWrongValue.Equal(err), IsTrue) @@ -1941,7 +1941,7 @@ func (s *testIntegrationSuite2) TestTimeBuiltin(c *C) { c.Assert(err, IsNil) _, err = tk.Exec("insert into t value(dayOfYear('0000-00-00'))") - c.Assert(table.ErrTruncatedWrongValueForField.Equal(err), IsTrue) + c.Assert(types.ErrWrongValue.Equal(err), IsTrue) _, err = tk.Exec(`update t set a = dayOfYear("0000-00-00")`) c.Assert(types.ErrWrongValue.Equal(err), IsTrue) _, err = tk.Exec(`delete from t where a = dayOfYear(123)`) @@ -2038,7 +2038,7 @@ func (s *testIntegrationSuite2) TestTimeBuiltin(c *C) { tk.MustExec("set sql_mode = 'STRICT_TRANS_TABLES'") _, err = tk.Exec("insert into t value(dayname('0000-00-00'))") - c.Assert(table.ErrTruncatedWrongValueForField.Equal(err), IsTrue) + c.Assert(types.ErrWrongValue.Equal(err), IsTrue) _, err = tk.Exec(`update t set a = dayname("0000-00-00")`) c.Assert(types.ErrWrongValue.Equal(err), IsTrue) _, err = tk.Exec(`delete from t where a = dayname(123)`) From 449c0c6b42f2cc7255a670b7c1859615a91af493 Mon Sep 17 00:00:00 2001 From: O2 Date: Mon, 21 Jun 2021 11:52:52 +0800 Subject: [PATCH 24/25] executor: checking chunk is full precedes filtering (#23963) --- executor/executor.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/executor/executor.go b/executor/executor.go index b74520f556b37..a3497b1b15331 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1290,12 +1290,14 @@ func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { for { for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { - if !e.selected[e.inputRow.Idx()] { - continue - } if req.IsFull() { return nil } + + if !e.selected[e.inputRow.Idx()] { + continue + } + req.AppendRow(e.inputRow) } mSize := e.childResult.MemoryUsage() From bd8d3b1a8cfb8888a58d997bda0e3a5debbbea35 Mon Sep 17 00:00:00 2001 From: bb7133 Date: Mon, 21 Jun 2021 13:26:52 +0800 Subject: [PATCH 25/25] expression: uncomment pushdown for JSONUnquote expression (#24504) --- expression/expression.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/expression/expression.go b/expression/expression.go index daa3be56a9959..6a456329bd0ce 100644 --- a/expression/expression.go +++ b/expression/expression.go @@ -961,7 +961,7 @@ func scalarExprSupportedByTiKV(sf *ScalarFunction) bool { ast.JSONType, ast.JSONExtract, ast.JSONObject, ast.JSONArray, ast.JSONMerge, ast.JSONSet, ast.JSONInsert /*ast.JSONReplace,*/, ast.JSONRemove, ast.JSONLength, // FIXME: JSONUnquote is incompatible with Coprocessor - // ast.JSONUnquote, + ast.JSONUnquote, // date functions. ast.DateFormat, ast.FromDays /*ast.ToDays,*/, ast.DayOfYear, ast.DayOfMonth, ast.Year, ast.Month,