diff --git a/.bazelrc b/.bazelrc index e1e11044ae533..855bc5770e169 100644 --- a/.bazelrc +++ b/.bazelrc @@ -5,9 +5,9 @@ run --color=yes build:release --workspace_status_command=./build/print-workspace-status.sh --stamp build:release --config=ci build --incompatible_strict_action_env --incompatible_enable_cc_toolchain_resolution -build:ci --remote_cache=http://172.16.4.3:8084/tidb +build:ci --remote_cache=http://172.16.4.21:8080/tidb --remote_timeout="10s" test:ci --verbose_failures test:ci --test_timeout=180 test:ci --test_env=GO_TEST_WRAP_TESTV=1 --test_verbose_timeout_warnings -test:ci --remote_cache=http://172.16.4.3:8084/tidb +test:ci --remote_cache=http://172.16.4.21:8080/tidb --remote_timeout="10s" test:ci --test_env=TZ=Asia/Shanghai --test_output=errors --experimental_ui_max_stdouterr_bytes=104857600 diff --git a/DEPS.bzl b/DEPS.bzl index 4b36dbcba743c..732ba50c819e8 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -1,6 +1,35 @@ load("@bazel_gazelle//:deps.bzl", "go_repository") def go_deps(): + go_repository( + name = "cc_mvdan_gofumpt", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/gofumpt", + sum = "h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8=", + version = "v0.3.1", + ) + go_repository( + name = "cc_mvdan_interfacer", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/interfacer", + sum = "h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=", + version = "v0.0.0-20180901003855-c20040233aed", + ) + go_repository( + name = "cc_mvdan_lint", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/lint", + sum = "h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=", + version = "v0.0.0-20170908181259-adc824a0674b", + ) + go_repository( + name = "cc_mvdan_unparam", + build_file_proto_mode = "disable", + importpath = "mvdan.cc/unparam", + sum = "h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=", + version = "v0.0.0-20211214103731-d0ef000c54e5", + ) + go_repository( name = "co_honnef_go_tools", build_file_proto_mode = "disable_global", @@ -9,6 +38,14 @@ def go_deps(): sum = "h1:ytYb4rOqyp1TSa2EPvNVwtPQJctSELKaMyLfqNP4+34=", version = "v0.3.2", ) + go_repository( + name = "com_4d63_gochecknoglobals", + build_file_proto_mode = "disable", + importpath = "4d63.com/gochecknoglobals", + sum = "h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0=", + version = "v0.1.0", + ) + go_repository( name = "com_github_ajg_form", build_file_proto_mode = "disable_global", @@ -37,6 +74,14 @@ def go_deps(): sum = "h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=", version = "v0.0.0-20190924025748-f65c72e2690d", ) + go_repository( + name = "com_github_alexkohler_prealloc", + build_file_proto_mode = "disable", + importpath = "github.com/alexkohler/prealloc", + sum = "h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=", + version = "v1.0.0", + ) + go_repository( name = "com_github_aliyun_alibaba_cloud_sdk_go", build_file_proto_mode = "disable", @@ -59,6 +104,21 @@ def go_deps(): sum = "h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=", version = "v1.0.0", ) + go_repository( + name = "com_github_antonboom_errname", + build_file_proto_mode = "disable", + importpath = "github.com/Antonboom/errname", + sum = "h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24=", + version = "v0.1.6", + ) + go_repository( + name = "com_github_antonboom_nilnil", + build_file_proto_mode = "disable", + importpath = "github.com/Antonboom/nilnil", + sum = "h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q=", + version = "v0.1.1", + ) + go_repository( name = "com_github_apache_thrift", build_file_proto_mode = "disable_global", @@ -94,6 +154,21 @@ def go_deps(): sum = "h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=", version = "v0.0.0-20180808171621-7fddfc383310", ) + go_repository( + name = "com_github_ashanbrown_forbidigo", + build_file_proto_mode = "disable", + importpath = "github.com/ashanbrown/forbidigo", + sum = "h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc=", + version = "v1.3.0", + ) + go_repository( + name = "com_github_ashanbrown_makezero", + build_file_proto_mode = "disable", + importpath = "github.com/ashanbrown/makezero", + sum = "h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=", + version = "v1.1.1", + ) + go_repository( name = "com_github_aws_aws_sdk_go", build_file_proto_mode = "disable_global", @@ -164,6 +239,14 @@ def go_deps(): sum = "h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=", version = "v0.0.3-0.20200106085610-5cbc8cc4026c", ) + go_repository( + name = "com_github_bkielbasa_cyclop", + build_file_proto_mode = "disable", + importpath = "github.com/bkielbasa/cyclop", + sum = "h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A=", + version = "v1.2.0", + ) + go_repository( name = "com_github_blacktear23_go_proxyprotocol", build_file_proto_mode = "disable_global", @@ -171,12 +254,41 @@ def go_deps(): sum = "h1:WmMmtZanGEfIHnJN9N3A4Pl6mM69D+GxEph2eOaCf7g=", version = "v1.0.0", ) + go_repository( + name = "com_github_blizzy78_varnamelen", + build_file_proto_mode = "disable", + importpath = "github.com/blizzy78/varnamelen", + sum = "h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M=", + version = "v0.8.0", + ) + go_repository( + name = "com_github_bombsimon_wsl_v3", + build_file_proto_mode = "disable", + importpath = "github.com/bombsimon/wsl/v3", + sum = "h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM=", + version = "v3.3.0", + ) + go_repository( + name = "com_github_breml_bidichk", + build_file_proto_mode = "disable", + importpath = "github.com/breml/bidichk", + sum = "h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_breml_errchkjson", + build_file_proto_mode = "disable", + importpath = "github.com/breml/errchkjson", + sum = "h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw=", + version = "v0.3.0", + ) + go_repository( name = "com_github_burntsushi_toml", build_file_proto_mode = "disable_global", importpath = "github.com/BurntSushi/toml", - sum = "h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=", - version = "v0.4.1", + sum = "h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=", + version = "v1.1.0", ) go_repository( name = "com_github_burntsushi_xgb", @@ -185,6 +297,14 @@ def go_deps(): sum = "h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=", version = "v0.0.0-20160522181843-27f122750802", ) + go_repository( + name = "com_github_butuzov_ireturn", + build_file_proto_mode = "disable", + importpath = "github.com/butuzov/ireturn", + sum = "h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY=", + version = "v0.1.1", + ) + go_repository( name = "com_github_carlmjohnson_flagext", build_file_proto_mode = "disable_global", @@ -227,6 +347,13 @@ def go_deps(): sum = "h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk=", version = "v0.0.9", ) + go_repository( + name = "com_github_chavacava_garif", + build_file_proto_mode = "disable", + importpath = "github.com/chavacava/garif", + sum = "h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M=", + version = "v0.0.0-20220316182200-5cad0b5181d4", + ) go_repository( name = "com_github_cheggaaa_pb_v3", @@ -509,6 +636,14 @@ def go_deps(): sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", version = "v1.1.1", ) + go_repository( + name = "com_github_denis_tingaikin_go_header", + build_file_proto_mode = "disable", + importpath = "github.com/denis-tingaikin/go-header", + sum = "h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=", + version = "v0.4.3", + ) + go_repository( name = "com_github_dgraph_io_badger", build_file_proto_mode = "disable_global", @@ -545,6 +680,14 @@ def go_deps(): sum = "h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=", version = "v0.0.0-20181026042036-e10d5fee7954", ) + go_repository( + name = "com_github_djarvur_go_err113", + build_file_proto_mode = "disable", + importpath = "github.com/Djarvur/go-err113", + sum = "h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=", + version = "v0.0.0-20210108212216-aea10b59be24", + ) + go_repository( name = "com_github_dnaeon_go_vcr", build_file_proto_mode = "disable_global", @@ -608,6 +751,14 @@ def go_deps(): sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=", version = "v0.1.0", ) + go_repository( + name = "com_github_esimonov_ifshort", + build_file_proto_mode = "disable", + importpath = "github.com/esimonov/ifshort", + sum = "h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA=", + version = "v1.0.4", + ) + go_repository( name = "com_github_etcd_io_bbolt", build_file_proto_mode = "disable_global", @@ -622,6 +773,14 @@ def go_deps(): sum = "h1:Y2I0lxOttdUKz+hNaIdG3FtjuQrTmwXun1opRV65IZc=", version = "v0.0.0-20190801230047-ad7f989257ca", ) + go_repository( + name = "com_github_ettle_strcase", + build_file_proto_mode = "disable", + importpath = "github.com/ettle/strcase", + sum = "h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw=", + version = "v0.1.1", + ) + go_repository( name = "com_github_fasthttp_contrib_websocket", build_file_proto_mode = "disable_global", @@ -643,6 +802,14 @@ def go_deps(): sum = "h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=", version = "v1.1.0", ) + go_repository( + name = "com_github_fatih_structtag", + build_file_proto_mode = "disable", + importpath = "github.com/fatih/structtag", + sum = "h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4=", + version = "v1.2.0", + ) + go_repository( name = "com_github_felixge_httpsnoop", build_file_proto_mode = "disable_global", @@ -650,6 +817,14 @@ def go_deps(): sum = "h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=", version = "v1.0.1", ) + go_repository( + name = "com_github_firefart_nonamedreturns", + build_file_proto_mode = "disable", + importpath = "github.com/firefart/nonamedreturns", + sum = "h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag=", + version = "v1.0.1", + ) + go_repository( name = "com_github_flosch_pongo2", build_file_proto_mode = "disable_global", @@ -689,8 +864,8 @@ def go_deps(): name = "com_github_fsnotify_fsnotify", build_file_proto_mode = "disable_global", importpath = "github.com/fsnotify/fsnotify", - sum = "h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=", - version = "v1.5.1", + sum = "h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=", + version = "v1.5.4", ) go_repository( name = "com_github_fsouza_fake_gcs_server", @@ -703,9 +878,17 @@ def go_deps(): name = "com_github_fzipp_gocyclo", build_file_proto_mode = "disable_global", importpath = "github.com/fzipp/gocyclo", - sum = "h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc=", - version = "v0.3.1", + sum = "h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw=", + version = "v0.5.1", ) + go_repository( + name = "com_github_gaijinentertainment_go_exhaustruct_v2", + build_file_proto_mode = "disable", + importpath = "github.com/GaijinEntertainment/go-exhaustruct/v2", + sum = "h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M=", + version = "v2.1.0", + ) + go_repository( name = "com_github_gavv_httpexpect", build_file_proto_mode = "disable_global", @@ -755,6 +938,14 @@ def go_deps(): sum = "h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=", version = "v0.0.0-20180628173108-788fd7840127", ) + go_repository( + name = "com_github_go_critic_go_critic", + build_file_proto_mode = "disable", + importpath = "github.com/go-critic/go-critic", + sum = "h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw=", + version = "v0.6.3", + ) + go_repository( name = "com_github_go_errors_errors", build_file_proto_mode = "disable_global", @@ -832,6 +1023,70 @@ def go_deps(): sum = "h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=", version = "v0.0.0-20210107165309-348f09dbbbc0", ) + go_repository( + name = "com_github_go_toolsmith_astcast", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astcast", + sum = "h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_astcopy", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astcopy", + sum = "h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_astequal", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astequal", + sum = "h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_go_toolsmith_astfmt", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astfmt", + sum = "h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_astp", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/astp", + sum = "h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_strparse", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/strparse", + sum = "h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4=", + version = "v1.0.0", + ) + go_repository( + name = "com_github_go_toolsmith_typep", + build_file_proto_mode = "disable", + importpath = "github.com/go-toolsmith/typep", + sum = "h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_go_xmlfmt_xmlfmt", + build_file_proto_mode = "disable", + importpath = "github.com/go-xmlfmt/xmlfmt", + sum = "h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=", + version = "v0.0.0-20191208150333-d5b6f63a941b", + ) + go_repository( + name = "com_github_gobwas_glob", + build_file_proto_mode = "disable", + importpath = "github.com/gobwas/glob", + sum = "h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=", + version = "v0.2.3", + ) + go_repository( name = "com_github_gobwas_httphead", build_file_proto_mode = "disable_global", @@ -860,6 +1115,14 @@ def go_deps(): sum = "h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=", version = "v5.0.4", ) + go_repository( + name = "com_github_gofrs_flock", + build_file_proto_mode = "disable", + importpath = "github.com/gofrs/flock", + sum = "h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=", + version = "v0.8.1", + ) + go_repository( name = "com_github_gogo_googleapis", build_file_proto_mode = "disable_global", @@ -935,6 +1198,28 @@ def go_deps(): sum = "h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=", version = "v0.0.4", ) + go_repository( + name = "com_github_golangci_check", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/check", + sum = "h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=", + version = "v0.0.0-20180506172741-cfe4005ccda2", + ) + go_repository( + name = "com_github_golangci_dupl", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/dupl", + sum = "h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=", + version = "v0.0.0-20180902072040-3e9179ac440a", + ) + go_repository( + name = "com_github_golangci_go_misc", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/go-misc", + sum = "h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=", + version = "v0.0.0-20220329215616-d24fe342adfe", + ) + go_repository( name = "com_github_golangci_gofmt", build_file_proto_mode = "disable", @@ -942,6 +1227,35 @@ def go_deps(): sum = "h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=", version = "v0.0.0-20190930125516-244bba706f1a", ) + go_repository( + name = "com_github_golangci_golangci_lint", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/golangci-lint", + sum = "h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo=", + version = "v1.46.2", + ) + go_repository( + name = "com_github_golangci_gosec", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/gosec", + sum = "h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc=", + version = "v0.0.0-20180901114220-8afd9cbb6cfb", + ) + go_repository( + name = "com_github_golangci_lint_1", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/lint-1", + sum = "h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=", + version = "v0.0.0-20191013205115-297bf364a8e0", + ) + go_repository( + name = "com_github_golangci_maligned", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/maligned", + sum = "h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=", + version = "v0.0.0-20180506175553-b1d89398deca", + ) + go_repository( name = "com_github_golangci_misspell", build_file_proto_mode = "disable", @@ -957,6 +1271,14 @@ def go_deps(): sum = "h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=", version = "v0.0.0-20180630174525-215b22d4de21", ) + go_repository( + name = "com_github_golangci_revgrep", + build_file_proto_mode = "disable", + importpath = "github.com/golangci/revgrep", + sum = "h1:SgM7GDZTxtTTQPU84heOxy34iG5Du7F2jcoZnvp+fXI=", + version = "v0.0.0-20210930125155-c22e5001d4f2", + ) + go_repository( name = "com_github_golangci_unconvert", build_file_proto_mode = "disable", @@ -1092,6 +1414,35 @@ def go_deps(): sum = "h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=", version = "v1.4.2", ) + go_repository( + name = "com_github_gostaticanalysis_analysisutil", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/analysisutil", + sum = "h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk=", + version = "v0.7.1", + ) + go_repository( + name = "com_github_gostaticanalysis_comment", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/comment", + sum = "h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q=", + version = "v1.4.2", + ) + go_repository( + name = "com_github_gostaticanalysis_forcetypeassert", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/forcetypeassert", + sum = "h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70=", + version = "v0.1.0", + ) + go_repository( + name = "com_github_gostaticanalysis_nilerr", + build_file_proto_mode = "disable", + importpath = "github.com/gostaticanalysis/nilerr", + sum = "h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk=", + version = "v0.1.1", + ) + go_repository( name = "com_github_grpc_ecosystem_go_grpc_middleware", build_file_proto_mode = "disable_global", @@ -1164,8 +1515,8 @@ def go_deps(): name = "com_github_hashicorp_go_multierror", build_file_proto_mode = "disable_global", importpath = "github.com/hashicorp/go-multierror", - sum = "h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=", - version = "v1.0.0", + sum = "h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=", + version = "v1.1.1", ) go_repository( name = "com_github_hashicorp_go_net", @@ -1206,8 +1557,8 @@ def go_deps(): name = "com_github_hashicorp_go_version", build_file_proto_mode = "disable_global", importpath = "github.com/hashicorp/go-version", - sum = "h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=", - version = "v1.2.0", + sum = "h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=", + version = "v1.4.0", ) go_repository( name = "com_github_hashicorp_golang_lru", @@ -1392,6 +1743,28 @@ def go_deps(): sum = "h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk=", version = "v2.5.1", ) + go_repository( + name = "com_github_jgautheron_goconst", + build_file_proto_mode = "disable", + importpath = "github.com/jgautheron/goconst", + sum = "h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=", + version = "v1.5.1", + ) + go_repository( + name = "com_github_jingyugao_rowserrcheck", + build_file_proto_mode = "disable", + importpath = "github.com/jingyugao/rowserrcheck", + sum = "h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=", + version = "v1.1.1", + ) + go_repository( + name = "com_github_jirfag_go_printf_func_name", + build_file_proto_mode = "disable", + importpath = "github.com/jirfag/go-printf-func-name", + sum = "h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=", + version = "v0.0.0-20200119135958-7558a9eaa5af", + ) + go_repository( name = "com_github_jmespath_go_jmespath", build_file_proto_mode = "disable_global", @@ -1490,6 +1863,14 @@ def go_deps(): sum = "h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=", version = "v1.3.0", ) + go_repository( + name = "com_github_julz_importas", + build_file_proto_mode = "disable", + importpath = "github.com/julz/importas", + sum = "h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY=", + version = "v0.1.0", + ) + go_repository( name = "com_github_jung_kurt_gofpdf", build_file_proto_mode = "disable_global", @@ -1599,6 +1980,21 @@ def go_deps(): sum = "h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=", version = "v0.2.0", ) + go_repository( + name = "com_github_kulti_thelper", + build_file_proto_mode = "disable", + importpath = "github.com/kulti/thelper", + sum = "h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs=", + version = "v0.6.2", + ) + go_repository( + name = "com_github_kunwardeep_paralleltest", + build_file_proto_mode = "disable", + importpath = "github.com/kunwardeep/paralleltest", + sum = "h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI=", + version = "v1.0.3", + ) + go_repository( name = "com_github_kyoh86_exportloopref", build_file_proto_mode = "disable", @@ -1621,6 +2017,35 @@ def go_deps(): sum = "h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=", version = "v0.3.0", ) + go_repository( + name = "com_github_ldez_gomoddirectives", + build_file_proto_mode = "disable", + importpath = "github.com/ldez/gomoddirectives", + sum = "h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=", + version = "v0.2.3", + ) + go_repository( + name = "com_github_ldez_tagliatelle", + build_file_proto_mode = "disable", + importpath = "github.com/ldez/tagliatelle", + sum = "h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM=", + version = "v0.3.1", + ) + go_repository( + name = "com_github_leonklingele_grouper", + build_file_proto_mode = "disable", + importpath = "github.com/leonklingele/grouper", + sum = "h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg=", + version = "v1.1.0", + ) + go_repository( + name = "com_github_lufeee_execinquery", + build_file_proto_mode = "disable", + importpath = "github.com/lufeee/execinquery", + sum = "h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=", + version = "v1.2.1", + ) + go_repository( name = "com_github_lufia_plan9stats", build_file_proto_mode = "disable_global", @@ -1632,9 +2057,31 @@ def go_deps(): name = "com_github_magiconair_properties", build_file_proto_mode = "disable_global", importpath = "github.com/magiconair/properties", - sum = "h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=", - version = "v1.8.1", + sum = "h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=", + version = "v1.8.6", ) + go_repository( + name = "com_github_maratori_testpackage", + build_file_proto_mode = "disable", + importpath = "github.com/maratori/testpackage", + sum = "h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=", + version = "v1.0.1", + ) + go_repository( + name = "com_github_masterminds_semver", + build_file_proto_mode = "disable", + importpath = "github.com/Masterminds/semver", + sum = "h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=", + version = "v1.5.0", + ) + go_repository( + name = "com_github_matoous_godox", + build_file_proto_mode = "disable", + importpath = "github.com/matoous/godox", + sum = "h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA=", + version = "v0.0.0-20210227103229-6504466cf951", + ) + go_repository( name = "com_github_mattn_go_colorable", build_file_proto_mode = "disable_global", @@ -1670,6 +2117,14 @@ def go_deps(): sum = "h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=", version = "v1.0.1", ) + go_repository( + name = "com_github_mbilski_exhaustivestruct", + build_file_proto_mode = "disable", + importpath = "github.com/mbilski/exhaustivestruct", + sum = "h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=", + version = "v1.2.0", + ) + go_repository( name = "com_github_mediocregopher_mediocre_go_lib", build_file_proto_mode = "disable_global", @@ -1684,6 +2139,14 @@ def go_deps(): sum = "h1:oacPXPKHJg0hcngVVrdtTnfGJiS+PtwoQwTBZGFlV4k=", version = "v3.3.0", ) + go_repository( + name = "com_github_mgechev_revive", + build_file_proto_mode = "disable", + importpath = "github.com/mgechev/revive", + sum = "h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4=", + version = "v1.2.1", + ) + go_repository( name = "com_github_microcosm_cc_bluemonday", build_file_proto_mode = "disable_global", @@ -1712,6 +2175,14 @@ def go_deps(): sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=", version = "v1.1.0", ) + go_repository( + name = "com_github_mitchellh_go_ps", + build_file_proto_mode = "disable", + importpath = "github.com/mitchellh/go-ps", + sum = "h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=", + version = "v1.0.0", + ) + go_repository( name = "com_github_mitchellh_go_testing_interface", build_file_proto_mode = "disable_global", @@ -1737,8 +2208,8 @@ def go_deps(): name = "com_github_mitchellh_mapstructure", build_file_proto_mode = "disable_global", importpath = "github.com/mitchellh/mapstructure", - sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=", - version = "v1.1.2", + sum = "h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=", + version = "v1.5.0", ) go_repository( name = "com_github_modern_go_concurrent", @@ -1761,6 +2232,14 @@ def go_deps(): sum = "h1:8Q0qkMVC/MmWkpIdlvZgcv2o2jrlF6zqVOh7W5YHdMA=", version = "v0.0.0-20171022184752-b58185e213c5", ) + go_repository( + name = "com_github_moricho_tparallel", + build_file_proto_mode = "disable", + importpath = "github.com/moricho/tparallel", + sum = "h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=", + version = "v0.2.1", + ) + go_repository( name = "com_github_moul_http2curl", build_file_proto_mode = "disable_global", @@ -1775,6 +2254,14 @@ def go_deps(): sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=", version = "v0.0.0-20190716064945-2f068394615f", ) + go_repository( + name = "com_github_nakabonne_nestif", + build_file_proto_mode = "disable", + importpath = "github.com/nakabonne/nestif", + sum = "h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=", + version = "v0.3.1", + ) + go_repository( name = "com_github_nats_io_nats_go", build_file_proto_mode = "disable_global", @@ -1796,6 +2283,14 @@ def go_deps(): sum = "h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=", version = "v1.0.1", ) + go_repository( + name = "com_github_nbutton23_zxcvbn_go", + build_file_proto_mode = "disable", + importpath = "github.com/nbutton23/zxcvbn-go", + sum = "h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=", + version = "v0.0.0-20210217022336-fa2cb2858354", + ) + go_repository( name = "com_github_ncw_directio", build_file_proto_mode = "disable_global", @@ -1824,6 +2319,14 @@ def go_deps(): sum = "h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=", version = "v0.0.0-20200227124842-a10e7caefd8e", ) + go_repository( + name = "com_github_nishanths_exhaustive", + build_file_proto_mode = "disable", + importpath = "github.com/nishanths/exhaustive", + sum = "h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA=", + version = "v0.7.11", + ) + go_repository( name = "com_github_nishanths_predeclared", build_file_proto_mode = "disable", @@ -1881,6 +2384,14 @@ def go_deps(): sum = "h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=", version = "v1.18.1", ) + go_repository( + name = "com_github_openpeedeep_depguard", + build_file_proto_mode = "disable", + importpath = "github.com/OpenPeeDeeP/depguard", + sum = "h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o=", + version = "v1.1.0", + ) + go_repository( name = "com_github_opentracing_basictracer_go", build_file_proto_mode = "disable_global", @@ -1913,9 +2424,24 @@ def go_deps(): name = "com_github_pelletier_go_toml", build_file_proto_mode = "disable_global", importpath = "github.com/pelletier/go-toml", - sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=", - version = "v1.2.0", + sum = "h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=", + version = "v1.9.5", ) + go_repository( + name = "com_github_pelletier_go_toml_v2", + build_file_proto_mode = "disable", + importpath = "github.com/pelletier/go-toml/v2", + sum = "h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo=", + version = "v2.0.0", + ) + go_repository( + name = "com_github_phayes_checkstyle", + build_file_proto_mode = "disable", + importpath = "github.com/phayes/checkstyle", + sum = "h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=", + version = "v0.0.0-20170904204023-bfd46e6a821d", + ) + go_repository( name = "com_github_phayes_freeport", build_file_proto_mode = "disable_global", @@ -1976,8 +2502,8 @@ def go_deps(): name = "com_github_pingcap_kvproto", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/kvproto", - sum = "h1:TZ0teMZoKHnZDlJxNkWrp5Sgv3w+ruNbrqtBYKsfaNw=", - version = "v0.0.0-20220525022339-6aaebf466305", + sum = "h1:nP2wmyw9JTRsk5rm+tZtfAso6c/1FvuaFNbXTaYz3FE=", + version = "v0.0.0-20220705053936-aa9c2d20cd2a", ) go_repository( name = "com_github_pingcap_log", @@ -1997,8 +2523,8 @@ def go_deps(): name = "com_github_pingcap_tipb", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/tipb", - sum = "h1:L4nZwfYSrIsWPAZR8zMwHaNQJy0Rjy3Od6Smj5mlOms=", - version = "v0.0.0-20220602075447-4847c5d68e73", + sum = "h1:oYn6UiUSnVlMBr4rLOweNWtdAon5wCLnLGDSFf/8kMA=", + version = "v0.0.0-20220704030114-0f4f873beca8", ) go_repository( name = "com_github_pkg_browser", @@ -2028,6 +2554,14 @@ def go_deps(): sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", version = "v1.0.0", ) + go_repository( + name = "com_github_polyfloyd_go_errorlint", + build_file_proto_mode = "disable", + importpath = "github.com/polyfloyd/go-errorlint", + sum = "h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM=", + version = "v1.0.0", + ) + go_repository( name = "com_github_posener_complete", build_file_proto_mode = "disable_global", @@ -2077,6 +2611,42 @@ def go_deps(): sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=", version = "v0.7.1", ) + go_repository( + name = "com_github_quasilyte_go_ruleguard", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/go-ruleguard", + sum = "h1:sWFavxtIctGrVs5SYZ5Ml1CvrDAs8Kf5kx2PI3C41dA=", + version = "v0.3.16-0.20220213074421-6aa060fab41a", + ) + go_repository( + name = "com_github_quasilyte_go_ruleguard_dsl", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/go-ruleguard/dsl", + sum = "h1:5+KTKb2YREUYiqZFEIuifFyBxlcCUPWgNZkWy71XS0Q=", + version = "v0.3.19", + ) + go_repository( + name = "com_github_quasilyte_gogrep", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/gogrep", + sum = "h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8=", + version = "v0.0.0-20220120141003-628d8b3623b5", + ) + go_repository( + name = "com_github_quasilyte_regex_syntax", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/regex/syntax", + sum = "h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=", + version = "v0.0.0-20200407221936-30656e2c4a95", + ) + go_repository( + name = "com_github_quasilyte_stdinfo", + build_file_proto_mode = "disable", + importpath = "github.com/quasilyte/stdinfo", + sum = "h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs=", + version = "v0.0.0-20220114132959-f7386bf02567", + ) + go_repository( name = "com_github_rcrowley_go_metrics", build_file_proto_mode = "disable_global", @@ -2126,6 +2696,21 @@ def go_deps(): sum = "h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=", version = "v2.1.0", ) + go_repository( + name = "com_github_ryancurrah_gomodguard", + build_file_proto_mode = "disable", + importpath = "github.com/ryancurrah/gomodguard", + sum = "h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8=", + version = "v1.2.3", + ) + go_repository( + name = "com_github_ryanrolds_sqlclosecheck", + build_file_proto_mode = "disable", + importpath = "github.com/ryanrolds/sqlclosecheck", + sum = "h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=", + version = "v0.3.0", + ) + go_repository( name = "com_github_ryanuber_columnize", build_file_proto_mode = "disable_global", @@ -2133,6 +2718,14 @@ def go_deps(): sum = "h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=", version = "v2.1.0+incompatible", ) + go_repository( + name = "com_github_sanposhiho_wastedassign_v2", + build_file_proto_mode = "disable", + importpath = "github.com/sanposhiho/wastedassign/v2", + sum = "h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=", + version = "v2.0.6", + ) + go_repository( name = "com_github_sclevine_agouti", build_file_proto_mode = "disable_global", @@ -2147,6 +2740,14 @@ def go_deps(): sum = "h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=", version = "v0.0.0-20170313163322-e2103e2c3529", ) + go_repository( + name = "com_github_securego_gosec_v2", + build_file_proto_mode = "disable", + importpath = "github.com/securego/gosec/v2", + sum = "h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=", + version = "v2.11.0", + ) + go_repository( name = "com_github_sergi_go_diff", build_file_proto_mode = "disable_global", @@ -2154,12 +2755,20 @@ def go_deps(): sum = "h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=", version = "v1.1.0", ) + go_repository( + name = "com_github_shazow_go_diff", + build_file_proto_mode = "disable", + importpath = "github.com/shazow/go-diff", + sum = "h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=", + version = "v0.0.0-20160112020656-b6b7b6733b8c", + ) + go_repository( name = "com_github_shirou_gopsutil_v3", build_file_proto_mode = "disable_global", importpath = "github.com/shirou/gopsutil/v3", - sum = "h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA=", - version = "v3.21.12", + sum = "h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI=", + version = "v3.22.4", ) go_repository( name = "com_github_shopify_goreferrer", @@ -2217,6 +2826,21 @@ def go_deps(): sum = "h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=", version = "v1.8.1", ) + go_repository( + name = "com_github_sivchari_containedctx", + build_file_proto_mode = "disable", + importpath = "github.com/sivchari/containedctx", + sum = "h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=", + version = "v1.0.2", + ) + go_repository( + name = "com_github_sivchari_tenv", + build_file_proto_mode = "disable", + importpath = "github.com/sivchari/tenv", + sum = "h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ=", + version = "v1.5.0", + ) + go_repository( name = "com_github_smartystreets_assertions", build_file_proto_mode = "disable_global", @@ -2238,6 +2862,21 @@ def go_deps(): sum = "h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=", version = "v0.1.5", ) + go_repository( + name = "com_github_sonatard_noctx", + build_file_proto_mode = "disable", + importpath = "github.com/sonatard/noctx", + sum = "h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY=", + version = "v0.0.1", + ) + go_repository( + name = "com_github_sourcegraph_go_diff", + build_file_proto_mode = "disable", + importpath = "github.com/sourcegraph/go-diff", + sum = "h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ=", + version = "v0.6.1", + ) + go_repository( name = "com_github_spaolacci_murmur3", build_file_proto_mode = "disable_global", @@ -2249,15 +2888,15 @@ def go_deps(): name = "com_github_spf13_afero", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/afero", - sum = "h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=", - version = "v1.2.2", + sum = "h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo=", + version = "v1.8.2", ) go_repository( name = "com_github_spf13_cast", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/cast", - sum = "h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=", - version = "v1.3.0", + sum = "h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=", + version = "v1.4.1", ) go_repository( name = "com_github_spf13_cobra", @@ -2270,8 +2909,8 @@ def go_deps(): name = "com_github_spf13_jwalterweatherman", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/jwalterweatherman", - sum = "h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=", - version = "v1.0.0", + sum = "h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=", + version = "v1.1.0", ) go_repository( name = "com_github_spf13_pflag", @@ -2284,9 +2923,17 @@ def go_deps(): name = "com_github_spf13_viper", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/viper", - sum = "h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM=", - version = "v1.7.0", + sum = "h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=", + version = "v1.11.0", ) + go_repository( + name = "com_github_ssgreg_nlreturn_v2", + build_file_proto_mode = "disable", + importpath = "github.com/ssgreg/nlreturn/v2", + sum = "h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=", + version = "v2.2.1", + ) + go_repository( name = "com_github_stathat_consistent", build_file_proto_mode = "disable", @@ -2294,6 +2941,13 @@ def go_deps(): sum = "h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U=", version = "v1.0.0", ) + go_repository( + name = "com_github_stbenjam_no_sprintf_host_port", + build_file_proto_mode = "disable", + importpath = "github.com/stbenjam/no-sprintf-host-port", + sum = "h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc=", + version = "v0.1.1", + ) go_repository( name = "com_github_stretchr_objx", @@ -2316,6 +2970,13 @@ def go_deps(): sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=", version = "v1.2.0", ) + go_repository( + name = "com_github_sylvia7788_contextcheck", + build_file_proto_mode = "disable", + importpath = "github.com/sylvia7788/contextcheck", + sum = "h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04=", + version = "v1.0.4", + ) go_repository( name = "com_github_tdakkota_asciicheck", build_file_proto_mode = "disable", @@ -2324,6 +2985,14 @@ def go_deps(): version = "v0.1.1", ) + go_repository( + name = "com_github_tetafro_godot", + build_file_proto_mode = "disable", + importpath = "github.com/tetafro/godot", + sum = "h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=", + version = "v1.4.11", + ) + go_repository( name = "com_github_tiancaiamao_appdash", build_file_proto_mode = "disable_global", @@ -2345,19 +3014,27 @@ def go_deps(): sum = "h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk=", version = "v0.0.0-20220307081149-841fa61e9710", ) + go_repository( + name = "com_github_timakin_bodyclose", + build_file_proto_mode = "disable", + importpath = "github.com/timakin/bodyclose", + sum = "h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro=", + version = "v0.0.0-20210704033933-f49887972144", + ) + go_repository( name = "com_github_tklauser_go_sysconf", build_file_proto_mode = "disable_global", importpath = "github.com/tklauser/go-sysconf", - sum = "h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo=", - version = "v0.3.9", + sum = "h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=", + version = "v0.3.10", ) go_repository( name = "com_github_tklauser_numcpus", build_file_proto_mode = "disable_global", importpath = "github.com/tklauser/numcpus", - sum = "h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=", - version = "v0.3.0", + sum = "h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=", + version = "v0.4.0", ) go_repository( name = "com_github_tmc_grpc_websocket_proxy", @@ -2366,6 +3043,21 @@ def go_deps(): sum = "h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=", version = "v0.0.0-20201229170055-e5319fda7802", ) + go_repository( + name = "com_github_tomarrell_wrapcheck_v2", + build_file_proto_mode = "disable", + importpath = "github.com/tomarrell/wrapcheck/v2", + sum = "h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw=", + version = "v2.6.1", + ) + go_repository( + name = "com_github_tommy_muehle_go_mnd_v2", + build_file_proto_mode = "disable", + importpath = "github.com/tommy-muehle/go-mnd/v2", + sum = "h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s=", + version = "v2.5.0", + ) + go_repository( name = "com_github_twmb_murmur3", build_file_proto_mode = "disable_global", @@ -2401,6 +3093,21 @@ def go_deps(): sum = "h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648=", version = "v0.0.0-20181204163529-d75b2dcb6bc8", ) + go_repository( + name = "com_github_ultraware_funlen", + build_file_proto_mode = "disable", + importpath = "github.com/ultraware/funlen", + sum = "h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=", + version = "v0.0.3", + ) + go_repository( + name = "com_github_ultraware_whitespace", + build_file_proto_mode = "disable", + importpath = "github.com/ultraware/whitespace", + sum = "h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=", + version = "v0.0.5", + ) + go_repository( name = "com_github_urfave_negroni", build_file_proto_mode = "disable_global", @@ -2408,6 +3115,14 @@ def go_deps(): sum = "h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=", version = "v1.0.0", ) + go_repository( + name = "com_github_uudashr_gocognit", + build_file_proto_mode = "disable", + importpath = "github.com/uudashr/gocognit", + sum = "h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=", + version = "v1.0.5", + ) + go_repository( name = "com_github_valyala_bytebufferpool", build_file_proto_mode = "disable_global", @@ -2429,6 +3144,14 @@ def go_deps(): sum = "h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=", version = "v1.0.1", ) + go_repository( + name = "com_github_valyala_quicktemplate", + build_file_proto_mode = "disable", + importpath = "github.com/valyala/quicktemplate", + sum = "h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=", + version = "v1.7.0", + ) + go_repository( name = "com_github_valyala_tcplisten", build_file_proto_mode = "disable_global", @@ -2513,6 +3236,14 @@ def go_deps(): sum = "h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=", version = "v0.0.3-0.20170626215501-b2862e3d0a77", ) + go_repository( + name = "com_github_yagipy_maintidx", + build_file_proto_mode = "disable", + importpath = "github.com/yagipy/maintidx", + sum = "h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=", + version = "v1.0.0", + ) + go_repository( name = "com_github_yalp_jsonpath", build_file_proto_mode = "disable_global", @@ -2520,6 +3251,14 @@ def go_deps(): sum = "h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=", version = "v0.0.0-20180802001716-5cc68e5049a0", ) + go_repository( + name = "com_github_yeya24_promlinter", + build_file_proto_mode = "disable", + importpath = "github.com/yeya24/promlinter", + sum = "h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=", + version = "v0.2.0", + ) + go_repository( name = "com_github_yudai_gojsondiff", build_file_proto_mode = "disable_global", @@ -2555,6 +3294,14 @@ def go_deps(): sum = "h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=", version = "v1.2.2", ) + go_repository( + name = "com_gitlab_bosi_decorder", + build_file_proto_mode = "disable", + importpath = "gitlab.com/bosi/decorder", + sum = "h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=", + version = "v0.2.1", + ) + go_repository( name = "com_google_cloud_go", build_file_proto_mode = "disable_global", @@ -2686,8 +3433,8 @@ def go_deps(): name = "in_gopkg_ini_v1", build_file_proto_mode = "disable_global", importpath = "gopkg.in/ini.v1", - sum = "h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=", - version = "v1.66.2", + sum = "h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=", + version = "v1.66.4", ) go_repository( name = "in_gopkg_jcmturner_aescts_v1", @@ -3002,8 +3749,8 @@ def go_deps(): name = "org_golang_google_protobuf", build_file_proto_mode = "disable_global", importpath = "google.golang.org/protobuf", - sum = "h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=", - version = "v1.27.1", + sum = "h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=", + version = "v1.28.0", ) go_repository( name = "org_golang_x_crypto", @@ -3115,8 +3862,8 @@ def go_deps(): name = "org_golang_x_xerrors", build_file_proto_mode = "disable_global", importpath = "golang.org/x/xerrors", - sum = "h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=", - version = "v0.0.0-20200804184101-5ec99f83aff1", + sum = "h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=", + version = "v0.0.0-20220411194840-2f41105eb62f", ) go_repository( name = "org_gonum_v1_gonum", diff --git a/br/pkg/lightning/common/util.go b/br/pkg/lightning/common/util.go index 40b7160ed5e30..67a26fb3ab411 100644 --- a/br/pkg/lightning/common/util.go +++ b/br/pkg/lightning/common/util.go @@ -21,9 +21,11 @@ import ( "encoding/json" "fmt" "io" + "net" "net/http" "net/url" "os" + "strconv" "strings" "syscall" "time" @@ -57,8 +59,9 @@ type MySQLConnectParam struct { } func (param *MySQLConnectParam) ToDSN() string { - dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8mb4&sql_mode='%s'&maxAllowedPacket=%d&tls=%s", - param.User, param.Password, param.Host, param.Port, + hostPort := net.JoinHostPort(param.Host, strconv.Itoa(param.Port)) + dsn := fmt.Sprintf("%s:%s@tcp(%s)/?charset=utf8mb4&sql_mode='%s'&maxAllowedPacket=%d&tls=%s", + param.User, param.Password, hostPort, param.SQLMode, param.MaxAllowedPacket, param.TLS) for k, v := range param.Vars { diff --git a/br/pkg/lightning/common/util_test.go b/br/pkg/lightning/common/util_test.go index cb13a10db9d5d..c7c95b44f69bf 100644 --- a/br/pkg/lightning/common/util_test.go +++ b/br/pkg/lightning/common/util_test.go @@ -99,6 +99,9 @@ func TestToDSN(t *testing.T) { }, } require.Equal(t, "root:123456@tcp(127.0.0.1:4000)/?charset=utf8mb4&sql_mode='strict'&maxAllowedPacket=1234&tls=cluster&tidb_distsql_scan_concurrency='1'", param.ToDSN()) + + param.Host = "::1" + require.Equal(t, "root:123456@tcp([::1]:4000)/?charset=utf8mb4&sql_mode='strict'&maxAllowedPacket=1234&tls=cluster&tidb_distsql_scan_concurrency='1'", param.ToDSN()) } type mockDriver struct { diff --git a/br/pkg/lightning/config/bytesize_test.go b/br/pkg/lightning/config/bytesize_test.go index 56637486275a4..0c05d9aa7a9a7 100644 --- a/br/pkg/lightning/config/bytesize_test.go +++ b/br/pkg/lightning/config/bytesize_test.go @@ -89,11 +89,11 @@ func TestByteSizeTOMLDecode(t *testing.T) { }, { input: "x = ['100000']", - err: "toml: cannot load TOML value.*", + err: "toml: incompatible types:.*", }, { input: "x = { size = '100000' }", - err: "toml: cannot load TOML value.*", + err: "toml: incompatible types:.*", }, } diff --git a/br/pkg/lightning/config/config_test.go b/br/pkg/lightning/config/config_test.go index 555cbed109f1d..845b325e7fcfd 100644 --- a/br/pkg/lightning/config/config_test.go +++ b/br/pkg/lightning/config/config_test.go @@ -517,7 +517,7 @@ func TestInvalidTOML(t *testing.T) { delimiter = '\' backslash-escape = true `)) - require.EqualError(t, err, "Near line 2 (last key parsed ''): expected '.' or '=', but got '[' instead") + require.EqualError(t, err, "toml: line 2: expected '.' or '=', but got '[' instead") } func TestTOMLUnusedKeys(t *testing.T) { @@ -674,7 +674,7 @@ func TestLoadFromInvalidConfig(t *testing.T) { ConfigFileContent: []byte("invalid toml"), }) require.Error(t, err) - require.Regexp(t, "Near line 1.*", err.Error()) + require.Regexp(t, "line 1.*", err.Error()) } func TestTomlPostRestore(t *testing.T) { diff --git a/br/pkg/pdutil/pd_serial_test.go b/br/pkg/pdutil/pd_serial_test.go index 608830fe190fe..b3cd714bb53b9 100644 --- a/br/pkg/pdutil/pd_serial_test.go +++ b/br/pkg/pdutil/pd_serial_test.go @@ -13,6 +13,7 @@ import ( "net/http/httptest" "net/url" "strings" + "sync" "testing" "time" @@ -239,11 +240,21 @@ func TestPauseSchedulersByKeyRange(t *testing.T) { labelExpires := make(map[string]time.Time) + var ( + mu sync.Mutex + deleted bool + ) + httpSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() + if deleted { + return + } if r.Method == http.MethodDelete { ruleID := strings.TrimPrefix(r.URL.Path, "/"+regionLabelPrefix+"/") - print(ruleID) delete(labelExpires, ruleID) + deleted = true return } var labelRule LabelRule diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index 77a1e0510b66b..6d2e5ec386e4c 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -315,7 +315,7 @@ func (rc *Client) SetStorage(ctx context.Context, backend *backuppb.StorageBacke func (rc *Client) InitClients(backend *backuppb.StorageBackend, isRawKvMode bool) { metaClient := NewSplitClient(rc.pdClient, rc.tlsConf, isRawKvMode) importCli := NewImportClient(metaClient, rc.tlsConf, rc.keepaliveConf) - rc.fileImporter = NewFileImporter(metaClient, importCli, backend, isRawKvMode, rc.rateLimit) + rc.fileImporter = NewFileImporter(metaClient, importCli, backend, isRawKvMode) } func (rc *Client) SetRawKVClient(c *RawKVBatchClient) { @@ -843,17 +843,50 @@ func (rc *Client) ExecDDLs(ctx context.Context, ddlJobs []*model.Job) error { return nil } -func (rc *Client) setSpeedLimit(ctx context.Context) error { - if !rc.hasSpeedLimited && rc.rateLimit != 0 { +// Mock the call of setSpeedLimit function +func MockCallSetSpeedLimit(ctx context.Context, fakeImportClient ImporterClient, rc *Client, concurrency uint) error { + rc.SetRateLimit(42) + rc.SetConcurrency(concurrency) + rc.hasSpeedLimited = false + rc.fileImporter = NewFileImporter(nil, fakeImportClient, nil, false) + return rc.setSpeedLimit(ctx, rc.rateLimit) +} + +func (rc *Client) ResetSpeedLimit(ctx context.Context) error { + rc.hasSpeedLimited = false + err := rc.setSpeedLimit(ctx, 0) + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (rc *Client) setSpeedLimit(ctx context.Context, rateLimit uint64) error { + if !rc.hasSpeedLimited { stores, err := conn.GetAllTiKVStores(ctx, rc.pdClient, conn.SkipTiFlash) if err != nil { return errors.Trace(err) } + + eg, ectx := errgroup.WithContext(ctx) for _, store := range stores { - err = rc.fileImporter.setDownloadSpeedLimit(ctx, store.GetId()) - if err != nil { + if err := ectx.Err(); err != nil { return errors.Trace(err) } + + finalStore := store + rc.workerPool.ApplyOnErrorGroup(eg, + func() error { + err = rc.fileImporter.setDownloadSpeedLimit(ectx, finalStore.GetId(), rateLimit) + if err != nil { + return errors.Trace(err) + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return errors.Trace(err) } rc.hasSpeedLimited = true } @@ -926,7 +959,7 @@ func (rc *Client) RestoreSSTFiles( } eg, ectx := errgroup.WithContext(ctx) - err = rc.setSpeedLimit(ctx) + err = rc.setSpeedLimit(ctx, rc.rateLimit) if err != nil { return errors.Trace(err) } @@ -1050,38 +1083,52 @@ func (rc *Client) switchTiKVMode(ctx context.Context, mode import_sstpb.SwitchMo } bfConf := backoff.DefaultConfig bfConf.MaxDelay = time.Second * 3 + + eg, ectx := errgroup.WithContext(ctx) for _, store := range stores { - opt := grpc.WithInsecure() - if rc.tlsConf != nil { - opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) - } - gctx, cancel := context.WithTimeout(ctx, time.Second*5) - connection, err := grpc.DialContext( - gctx, - store.GetAddress(), - opt, - grpc.WithBlock(), - grpc.FailOnNonTempDialError(true), - grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}), - // we don't need to set keepalive timeout here, because the connection lives - // at most 5s. (shorter than minimal value for keepalive time!) - ) - cancel() - if err != nil { + if err := ectx.Err(); err != nil { return errors.Trace(err) } - client := import_sstpb.NewImportSSTClient(connection) - _, err = client.SwitchMode(ctx, &import_sstpb.SwitchModeRequest{ - Mode: mode, - }) - if err != nil { - return errors.Trace(err) - } - err = connection.Close() - if err != nil { - log.Error("close grpc connection failed in switch mode", zap.Error(err)) - continue - } + + finalStore := store + rc.workerPool.ApplyOnErrorGroup(eg, + func() error { + opt := grpc.WithInsecure() + if rc.tlsConf != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(rc.tlsConf)) + } + gctx, cancel := context.WithTimeout(ectx, time.Second*5) + connection, err := grpc.DialContext( + gctx, + finalStore.GetAddress(), + opt, + grpc.WithBlock(), + grpc.FailOnNonTempDialError(true), + grpc.WithConnectParams(grpc.ConnectParams{Backoff: bfConf}), + // we don't need to set keepalive timeout here, because the connection lives + // at most 5s. (shorter than minimal value for keepalive time!) + ) + cancel() + if err != nil { + return errors.Trace(err) + } + client := import_sstpb.NewImportSSTClient(connection) + _, err = client.SwitchMode(ctx, &import_sstpb.SwitchModeRequest{ + Mode: mode, + }) + if err != nil { + return errors.Trace(err) + } + err = connection.Close() + if err != nil { + log.Error("close grpc connection failed in switch mode", zap.Error(err)) + } + return nil + }) + } + + if err = eg.Wait(); err != nil { + return errors.Trace(err) } return nil } diff --git a/br/pkg/restore/client_test.go b/br/pkg/restore/client_test.go index 334ac67387f5d..08e57e83a7095 100644 --- a/br/pkg/restore/client_test.go +++ b/br/pkg/restore/client_test.go @@ -4,11 +4,15 @@ package restore_test import ( "context" + "fmt" "math" + "sort" "strconv" + "sync" "testing" "time" + "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/gluetidb" "github.com/pingcap/tidb/br/pkg/metautil" @@ -240,3 +244,110 @@ func TestPreCheckTableTiFlashReplicas(t *testing.T) { require.Nil(t, tables[i].Info.TiFlashReplica) } } + +// Mock ImporterClient interface +type FakeImporterClient struct { + restore.ImporterClient +} + +// Record the stores that have communicated +type RecordStores struct { + mu sync.Mutex + stores []uint64 +} + +func NewRecordStores() RecordStores { + return RecordStores{stores: make([]uint64, 0)} +} + +func (r *RecordStores) put(id uint64) { + r.mu.Lock() + defer r.mu.Unlock() + r.stores = append(r.stores, id) +} + +func (r *RecordStores) sort() { + sort.Slice(r.stores, func(i, j int) bool { + return r.stores[i] < r.stores[j] + }) +} + +var recordStores RecordStores + +const ( + SET_SPEED_LIMIT_ERROR = 999999 + WORKING_TIME = 100 +) + +func (fakeImportCli FakeImporterClient) SetDownloadSpeedLimit( + ctx context.Context, + storeID uint64, + req *import_sstpb.SetDownloadSpeedLimitRequest, +) (*import_sstpb.SetDownloadSpeedLimitResponse, error) { + if storeID == SET_SPEED_LIMIT_ERROR { + return nil, fmt.Errorf("storeID:%v ERROR.", storeID) + } + + time.Sleep(WORKING_TIME * time.Millisecond) // simulate doing 100 ms work + recordStores.put(storeID) + return nil, nil +} + +func TestSetSpeedLimit(t *testing.T) { + mockStores := []*metapb.Store{ + {Id: 1}, + {Id: 2}, + {Id: 3}, + {Id: 4}, + {Id: 5}, + {Id: 6}, + {Id: 7}, + {Id: 8}, + {Id: 9}, + {Id: 10}, + } + + // 1. The cost of concurrent communication is expected to be less than the cost of serial communication. + client := restore.NewRestoreClient(fakePDClient{ + stores: mockStores, + }, nil, defaultKeepaliveCfg, false) + ctx := context.Background() + + recordStores = NewRecordStores() + start := time.Now() + err := restore.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 10) + cost := time.Since(start) + require.NoError(t, err) + + recordStores.sort() + t.Logf("Total Cost: %v\n", cost) + t.Logf("Has Communicated: %v\n", recordStores.stores) + + serialCost := len(mockStores) * WORKING_TIME + require.Less(t, cost, time.Duration(serialCost)*time.Millisecond) + require.Equal(t, len(mockStores), len(recordStores.stores)) + for i := 0; i < len(recordStores.stores); i++ { + require.Equal(t, mockStores[i].Id, recordStores.stores[i]) + } + + // 2. Expect the number of communicated stores to be less than the length of the mockStore + // Because subsequent unstarted communications are aborted when an error is encountered. + recordStores = NewRecordStores() + mockStores[5].Id = SET_SPEED_LIMIT_ERROR // setting a fault store + client = restore.NewRestoreClient(fakePDClient{ + stores: mockStores, + }, nil, defaultKeepaliveCfg, false) + + // Concurrency needs to be less than the number of stores + err = restore.MockCallSetSpeedLimit(ctx, FakeImporterClient{}, client, 2) + require.Error(t, err) + t.Log(err) + + recordStores.sort() + sort.Slice(mockStores, func(i, j int) bool { return mockStores[i].Id < mockStores[j].Id }) + t.Logf("Has Communicated: %v\n", recordStores.stores) + require.Less(t, len(recordStores.stores), len(mockStores)) + for i := 0; i < len(recordStores.stores); i++ { + require.Equal(t, mockStores[i].Id, recordStores.stores[i]) + } +} diff --git a/br/pkg/restore/import.go b/br/pkg/restore/import.go index eb29b63b47f2f..63ac21f4b2e1c 100644 --- a/br/pkg/restore/import.go +++ b/br/pkg/restore/import.go @@ -238,7 +238,6 @@ type FileImporter struct { metaClient SplitClient importClient ImporterClient backend *backuppb.StorageBackend - rateLimit uint64 isRawKvMode bool rawStartKey []byte @@ -252,14 +251,12 @@ func NewFileImporter( importClient ImporterClient, backend *backuppb.StorageBackend, isRawKvMode bool, - rateLimit uint64, ) FileImporter { return FileImporter{ metaClient: metaClient, backend: backend, importClient: importClient, isRawKvMode: isRawKvMode, - rateLimit: rateLimit, } } @@ -499,9 +496,9 @@ func (importer *FileImporter) ImportSSTFiles( return errors.Trace(err) } -func (importer *FileImporter) setDownloadSpeedLimit(ctx context.Context, storeID uint64) error { +func (importer *FileImporter) setDownloadSpeedLimit(ctx context.Context, storeID, rateLimit uint64) error { req := &import_sstpb.SetDownloadSpeedLimitRequest{ - SpeedLimit: importer.rateLimit, + SpeedLimit: rateLimit, } _, err := importer.importClient.SetDownloadSpeedLimit(ctx, storeID, req) return errors.Trace(err) diff --git a/br/pkg/storage/local.go b/br/pkg/storage/local.go index c7c16bbc7259e..0004648899aa8 100644 --- a/br/pkg/storage/local.go +++ b/br/pkg/storage/local.go @@ -108,6 +108,7 @@ func (l *LocalStorage) URI() string { // Open a Reader by file path, path is a relative path to base path. func (l *LocalStorage) Open(ctx context.Context, path string) (ExternalFileReader, error) { + //nolint: gosec return os.Open(filepath.Join(l.base, path)) } diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index d139d58a12186..3c7095bc1df4e 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -16,6 +16,7 @@ import ( berrors "github.com/pingcap/tidb/br/pkg/errors" "github.com/pingcap/tidb/br/pkg/glue" "github.com/pingcap/tidb/br/pkg/httputil" + "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/metautil" "github.com/pingcap/tidb/br/pkg/pdutil" "github.com/pingcap/tidb/br/pkg/restore" @@ -61,6 +62,7 @@ const ( defaultPDConcurrency = 1 defaultBatchFlushInterval = 16 * time.Second defaultFlagDdlBatchSize = 128 + resetSpeedLimitRetryTimes = 3 ) const ( @@ -614,6 +616,25 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf finish = dropToBlackhole(ctx, afterRestoreStream, errCh, updateCh) } + // Reset speed limit. ResetSpeedLimit must be called after client.InitBackupMeta has been called. + defer func() { + var resetErr error + // In future we may need a mechanism to set speed limit in ttl. like what we do in switchmode. TODO + for retry := 0; retry < resetSpeedLimitRetryTimes; retry++ { + resetErr = client.ResetSpeedLimit(ctx) + if resetErr != nil { + log.Warn("failed to reset speed limit, retry it", + zap.Int("retry time", retry), logutil.ShortError(resetErr)) + time.Sleep(time.Duration(retry+3) * time.Second) + continue + } + break + } + if resetErr != nil { + log.Error("failed to reset speed limit", zap.Error(resetErr)) + } + }() + select { case err = <-errCh: err = multierr.Append(err, multierr.Combine(restore.Exhaust(errCh)...)) diff --git a/build/BUILD.bazel b/build/BUILD.bazel index 215244513658f..396f4e943ed06 100644 --- a/build/BUILD.bazel +++ b/build/BUILD.bazel @@ -125,6 +125,7 @@ nogo( "//build/linter/exportloopref:exportloopref", "//build/linter/gofmt:gofmt", "//build/linter/gci:gci", + "//build/linter/gosec:gosec", "//build/linter/ineffassign:ineffassign", "//build/linter/misspell:misspell", "//build/linter/prealloc:prealloc", diff --git a/build/linter/gosec/BUILD.bazel b/build/linter/gosec/BUILD.bazel new file mode 100644 index 0000000000000..e0a64e71a0af6 --- /dev/null +++ b/build/linter/gosec/BUILD.bazel @@ -0,0 +1,16 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "gosec", + srcs = ["analysis.go"], + importpath = "github.com/pingcap/tidb/build/linter/gosec", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_golangci_golangci_lint//pkg/result", + "@com_github_golangci_gosec//:gosec", + "@com_github_golangci_gosec//rules", + "@org_golang_x_tools//go/analysis", + "@org_golang_x_tools//go/loader", + ], +) diff --git a/build/linter/gosec/analysis.go b/build/linter/gosec/analysis.go new file mode 100644 index 0000000000000..2e93dbbdd3d59 --- /dev/null +++ b/build/linter/gosec/analysis.go @@ -0,0 +1,110 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gosec + +import ( + "fmt" + "go/token" + "go/types" + "io/ioutil" + "log" + "strconv" + + "github.com/golangci/golangci-lint/pkg/result" + "github.com/golangci/gosec" + "github.com/golangci/gosec/rules" + "github.com/pingcap/tidb/build/linter/util" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/loader" +) + +// Name is the name of the analyzer. +const Name = "gosec" + +// Analyzer is the analyzer struct of gosec. +var Analyzer = &analysis.Analyzer{ + Name: Name, + Doc: "Inspects source code for security problems", + Run: run, +} + +func init() { + util.SkipAnalyzer(Analyzer) +} + +func run(pass *analysis.Pass) (interface{}, error) { + gasConfig := gosec.NewConfig() + enabledRules := rules.Generate(func(id string) bool { + if id == "G104" || id == "G103" || id == "G101" || id == "G201" { + return true + } + return false + }) + logger := log.New(ioutil.Discard, "", 0) + analyzer := gosec.NewAnalyzer(gasConfig, logger) + analyzer.LoadRules(enabledRules.Builders()) + + var createdPkgs []*loader.PackageInfo + createdPkgs = append(createdPkgs, util.MakeFakeLoaderPackageInfo(pass)) + allPkgs := make(map[*types.Package]*loader.PackageInfo) + for _, pkg := range createdPkgs { + pkg := pkg + allPkgs[pkg.Pkg] = pkg + } + prog := &loader.Program{ + Fset: pass.Fset, + Imported: nil, // not used without .Created in any linter + Created: createdPkgs, // all initial packages + AllPackages: allPkgs, // all initial packages and their depndencies + } + + analyzer.ProcessProgram(prog) + issues, _ := analyzer.Report() + if len(issues) == 0 { + return nil, nil + } + severity, confidence := gosec.Low, gosec.Low + issues = filterIssues(issues, severity, confidence) + for _, i := range issues { + fileContent, tf, err := util.ReadFile(pass.Fset, i.File) + if err != nil { + panic(err) + } + text := fmt.Sprintf("[%s] %s: %s", Name, i.RuleID, i.What) // TODO: use severity and confidence + var r *result.Range + line, err := strconv.Atoi(i.Line) + if err != nil { + r = &result.Range{} + if n, rerr := fmt.Sscanf(i.Line, "%d-%d", &r.From, &r.To); rerr != nil || n != 2 { + continue + } + line = r.From + } + + pass.Reportf(token.Pos(tf.Base()+util.FindOffset(string(fileContent), line, 1)), text) + } + + return nil, nil +} + +func filterIssues(issues []*gosec.Issue, severity, confidence gosec.Score) []*gosec.Issue { + res := make([]*gosec.Issue, 0) + for _, issue := range issues { + if issue.Severity >= severity && issue.Confidence >= confidence { + res = append(res, issue) + } + } + return res +} diff --git a/build/linter/misspell/BUILD.bazel b/build/linter/misspell/BUILD.bazel index 8cdc74330a98a..6dfdff3fc3c78 100644 --- a/build/linter/misspell/BUILD.bazel +++ b/build/linter/misspell/BUILD.bazel @@ -7,7 +7,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//build/linter/util", - "@com_github_golangci_misspell//:go_default_library", + "@com_github_golangci_misspell//:misspell", "@org_golang_x_tools//go/analysis", ], ) diff --git a/build/linter/util/util.go b/build/linter/util/util.go index 0aaeb2bbb7419..afa56c515613f 100644 --- a/build/linter/util/util.go +++ b/build/linter/util/util.go @@ -172,6 +172,7 @@ func MakeFakeLoaderPackageInfo(pass *analysis.Pass) *loader.PackageInfo { // ReadFile reads a file and adds it to the FileSet // so that we can report errors against it using lineStart. func ReadFile(fset *token.FileSet, filename string) ([]byte, *token.File, error) { + //nolint: gosec content, err := ioutil.ReadFile(filename) if err != nil { return nil, nil, err diff --git a/build/nogo_config.json b/build/nogo_config.json index bd890a8f55832..b766d3ec45a14 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -154,6 +154,22 @@ "util/printer/printer.go": "ignore util/printer code" } }, + "gosec": { + "exclude_files": { + "external/": "no need to vet third party code", + "parser/goyacc/": "ignore goyacc code", + ".*_test\\.go$": "ignore generated code", + "/cgo/": "ignore cgo code", + "/rules_go_work-*": "ignore generated code", + "tools/check/ut.go": "ignore tools/check code", + "tools/check/xprog.go": "ignore tools/check code", + "cmd/pluginpkg/pluginpkg.go": "ignore cmd/pluginpkg code", + "tools/check/xprog.go:": "ignore tools/check code", + "cmd/explaintest/main.go": "ignore cmd/explaintest code", + "GOROOT/": "ignore code", + ".*_generated\\.go$": "ignore generated code" + } + }, "httpresponse": { "exclude_files": { "/external/": "no need to vet third party code", diff --git a/ddl/cancel_test.go b/ddl/cancel_test.go index 9f367eb40ca55..64179c19a8eca 100644 --- a/ddl/cancel_test.go +++ b/ddl/cancel_test.go @@ -206,14 +206,13 @@ var allTestCase = []testCancelJob{ {"alter table t_partition drop partition p6", false, model.StateDeleteReorganization, true, true, []string{"alter table t_partition add partition (partition p6 values less than (8192))"}}, {"alter table t_partition drop partition p6", false, model.StateNone, true, true, []string{"alter table t_partition add partition (partition p6 values less than (8192))"}}, // Drop indexes. - // TODO: fix schema state. - {"alter table t drop index mul_idx1, drop index mul_idx2", true, model.StateNone, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateWriteOnly, true, false, nil}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateWriteOnly, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteOnly, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteOnly, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteReorganization, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, - {"alter table t drop index mul_idx1, drop index mul_idx2", false, model.StateDeleteReorganization, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", true, subStates{model.StatePublic, model.StatePublic}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateWriteOnly, model.StateWriteOnly}, true, false, nil}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateWriteOnly, model.StateWriteOnly}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteOnly, model.StateWriteOnly}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteOnly, model.StateWriteOnly}, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteReorganization, model.StateWriteOnly}, true, false, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, + {"alter table t drop index mul_idx1, drop index mul_idx2", false, subStates{model.StateDeleteReorganization, model.StateWriteOnly}, false, true, []string{"alter table t add index mul_idx1(c1)", "alter table t add index mul_idx2(c1)"}}, // Alter db placement. {"alter database db_placement placement policy = 'alter_x'", true, model.StateNone, true, false, []string{"create placement policy alter_x PRIMARY_REGION=\"cn-east-1\", REGIONS=\"cn-east-1\";", "create database db_placement"}}, {"alter database db_placement placement policy = 'alter_x'", false, model.StatePublic, false, true, nil}, @@ -304,7 +303,6 @@ func TestCancel(t *testing.T) { for _, prepareSQL := range tc.prepareSQL { tk.MustExec(prepareSQL) } - cancel = false cancelWhenReorgNotStart = true registHook(hook, true) diff --git a/ddl/column.go b/ddl/column.go index 295c64f98862e..29b16c17140fd 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -51,38 +51,6 @@ import ( "go.uber.org/zap" ) -// adjustColumnInfoInDropColumn is used to set the correct position of column info when dropping column. -// 1. The offset of column should to be set to the last of the columns. -// 2. The dropped column is moved to the end of tblInfo.Columns, due to it was not public any more. -func adjustColumnInfoInDropColumn(tblInfo *model.TableInfo, offset int) { - oldCols := tblInfo.Columns - // Adjust column offset. - offsetChanged := make(map[int]int, len(oldCols)-offset-1) - for i := offset + 1; i < len(oldCols); i++ { - offsetChanged[oldCols[i].Offset] = i - 1 - oldCols[i].Offset = i - 1 - } - oldCols[offset].Offset = len(oldCols) - 1 - // For expression index, we drop hidden columns and index simultaneously. - // So we need to change the offset of expression index. - offsetChanged[offset] = len(oldCols) - 1 - // Update index column offset info. - // TODO: There may be some corner cases for index column offsets, we may check this later. - for _, idx := range tblInfo.Indices { - for _, col := range idx.Columns { - newOffset, ok := offsetChanged[col.Offset] - if ok { - col.Offset = newOffset - } - } - } - newCols := make([]*model.ColumnInfo, 0, len(oldCols)) - newCols = append(newCols, oldCols[:offset]...) - newCols = append(newCols, oldCols[offset+1:]...) - newCols = append(newCols, oldCols[offset]) - tblInfo.Columns = newCols -} - func createColumnInfoWithPosCheck(tblInfo *model.TableInfo, colInfo *model.ColumnInfo, pos *ast.ColumnPosition) (*model.ColumnInfo, *ast.ColumnPosition, int, error) { // Check column name duplicate. cols := tblInfo.Columns @@ -250,12 +218,6 @@ func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) return ver, errors.Trace(err) } -func setColumnsState(columnInfos []*model.ColumnInfo, state model.SchemaState) { - for i := range columnInfos { - columnInfos[i].State = state - } -} - // checkAfterPositionExists makes sure the column specified in AFTER clause is exists. // For example, ALTER TABLE t ADD COLUMN c3 INT AFTER c1. func checkAfterPositionExists(tblInfo *model.TableInfo, pos *ast.ColumnPosition) error { @@ -275,8 +237,6 @@ func setIndicesState(indexInfos []*model.IndexInfo, state model.SchemaState) { } func checkDropColumnForStatePublic(tblInfo *model.TableInfo, colInfo *model.ColumnInfo) (err error) { - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) // When the dropping column has not-null flag and it hasn't the default value, we can backfill the column value like "add column". // NOTE: If the state of StateWriteOnly can be rollbacked, we'd better reconsider the original default value. // And we need consider the column without not-null flag. @@ -320,6 +280,7 @@ func onDropColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) // public -> write only colInfo.State = model.StateWriteOnly setIndicesState(idxInfos, model.StateWriteOnly) + tblInfo.MoveColumnInfo(colInfo.Offset, len(tblInfo.Columns)-1) err = checkDropColumnForStatePublic(tblInfo, colInfo) if err != nil { return ver, errors.Trace(err) diff --git a/ddl/ddl.go b/ddl/ddl.go index 47456e5f81d17..038cd40c2feee 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -833,16 +833,6 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { logutil.BgLogger().Info("[ddl] DDL job is failed", zap.Int64("jobID", jobID)) return errors.Trace(historyJob.Error) } - // Only for JobStateCancelled job which is adding columns or drop columns or drop indexes. - if historyJob.IsCancelled() && (historyJob.Type == model.ActionDropIndexes) { - if historyJob.MultiSchemaInfo != nil && len(historyJob.MultiSchemaInfo.Warnings) != 0 { - for _, warning := range historyJob.MultiSchemaInfo.Warnings { - ctx.GetSessionVars().StmtCtx.AppendWarning(warning) - } - } - logutil.BgLogger().Info("[ddl] DDL job is cancelled", zap.Int64("jobID", jobID)) - return nil - } panic("When the state is JobStateRollbackDone or JobStateCancelled, historyJob.Error should never be nil") } } @@ -987,10 +977,25 @@ type Info struct { Jobs []*model.Job // It's the currently running jobs. } +// GetDDLInfoWithNewTxn returns DDL information using a new txn. +func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { + err := sessiontxn.NewTxn(context.Background(), s) + if err != nil { + return nil, err + } + info, err := GetDDLInfo(s) + s.RollbackTxn(context.Background()) + return info, err +} + // GetDDLInfo returns DDL information. -func GetDDLInfo(txn kv.Transaction) (*Info, error) { +func GetDDLInfo(s sessionctx.Context) (*Info, error) { var err error info := &Info{} + txn, err := s.Txn(true) + if err != nil { + return nil, err + } t := meta.NewMeta(txn) info.Jobs = make([]*model.Job, 0, 2) @@ -1145,15 +1150,16 @@ const MaxHistoryJobs = 10 // DefNumHistoryJobs is default value of the default number of history job const DefNumHistoryJobs = 10 -// GetHistoryDDLJobs returns the DDL history jobs and an error. +const batchNumHistoryJobs = 128 + +// GetLastNHistoryDDLJobs returns the DDL history jobs and an error. // The maximum count of history jobs is num. -func GetHistoryDDLJobs(txn kv.Transaction, maxNumJobs int) ([]*model.Job, error) { - t := meta.NewMeta(txn) - jobs, err := t.GetLastNHistoryDDLJobs(maxNumJobs) +func GetLastNHistoryDDLJobs(t *meta.Meta, maxNumJobs int) ([]*model.Job, error) { + iterator, err := t.GetLastHistoryDDLJobsIterator() if err != nil { return nil, errors.Trace(err) } - return jobs, nil + return iterator.GetLastJobs(maxNumJobs, nil) } // IterHistoryDDLJobs iterates history DDL jobs until the `finishFn` return true or error. @@ -1193,7 +1199,42 @@ func IterAllDDLJobs(txn kv.Transaction, finishFn func([]*model.Job) (bool, error // GetAllHistoryDDLJobs get all the done DDL jobs. func GetAllHistoryDDLJobs(m *meta.Meta) ([]*model.Job, error) { - return m.GetAllHistoryDDLJobs() + iterator, err := m.GetLastHistoryDDLJobsIterator() + if err != nil { + return nil, errors.Trace(err) + } + allJobs := make([]*model.Job, 0, batchNumHistoryJobs) + for { + jobs, err := iterator.GetLastJobs(batchNumHistoryJobs, nil) + if err != nil { + return nil, errors.Trace(err) + } + allJobs = append(allJobs, jobs...) + if len(jobs) < batchNumHistoryJobs { + break + } + } + // sort job. + sorter := &jobsSorter{jobs: allJobs} + sort.Sort(sorter) + return allJobs, nil +} + +// jobsSorter implements the sort.Interface interface. +type jobsSorter struct { + jobs []*model.Job +} + +func (s *jobsSorter) Swap(i, j int) { + s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] +} + +func (s *jobsSorter) Len() int { + return len(s.jobs) +} + +func (s *jobsSorter) Less(i, j int) bool { + return s.jobs[i].ID < s.jobs[j].ID } // GetHistoryJobByID return history DDL job by ID. diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index d1c92e48eedb1..9082944257c04 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -3078,7 +3078,7 @@ func checkMultiSpecs(sctx sessionctx.Context, specs []*ast.AlterTableSpec) error func allSupported(specs []*ast.AlterTableSpec) bool { for _, s := range specs { switch s.Tp { - case ast.AlterTableAddColumns, ast.AlterTableDropColumn: + case ast.AlterTableAddColumns, ast.AlterTableDropColumn, ast.AlterTableDropIndex, ast.AlterTableDropPrimaryKey: default: return false } @@ -3118,10 +3118,9 @@ func (d *ddl) AlterTable(ctx context.Context, sctx sessionctx.Context, stmt *ast if len(validSpecs) > 1 { useMultiSchemaChange := false switch validSpecs[0].Tp { - case ast.AlterTableAddColumns, ast.AlterTableDropColumn: + case ast.AlterTableAddColumns, ast.AlterTableDropColumn, + ast.AlterTableDropPrimaryKey, ast.AlterTableDropIndex: useMultiSchemaChange = true - case ast.AlterTableDropPrimaryKey, ast.AlterTableDropIndex: - err = d.DropIndexes(sctx, ident, validSpecs) default: return dbterror.ErrRunMultiSchemaChanges } @@ -4032,6 +4031,7 @@ func (d *ddl) DropColumn(ctx sessionctx.Context, ti ast.Ident, spec *ast.AlterTa if err != nil { return err } + var multiSchemaInfo *model.MultiSchemaInfo if variable.EnableChangeMultiSchema.Load() { multiSchemaInfo = &model.MultiSchemaInfo{} @@ -5963,10 +5963,12 @@ func buildFKInfo(fkName model.CIStr, keys []*ast.IndexPartSpecification, refer * // Check wrong reference options of foreign key on stored generated columns switch refer.OnUpdate.ReferOpt { case ast.ReferOptionCascade, ast.ReferOptionSetNull, ast.ReferOptionSetDefault: + //nolint: gosec return nil, dbterror.ErrWrongFKOptionForGeneratedColumn.GenWithStackByArgs("ON UPDATE " + refer.OnUpdate.ReferOpt.String()) } switch refer.OnDelete.ReferOpt { case ast.ReferOptionSetNull, ast.ReferOptionSetDefault: + //nolint: gosec return nil, dbterror.ErrWrongFKOptionForGeneratedColumn.GenWithStackByArgs("ON DELETE " + refer.OnDelete.ReferOpt.String()) } continue @@ -6124,65 +6126,11 @@ func (d *ddl) dropIndex(ctx sessionctx.Context, ti ast.Ident, indexName model.CI SchemaID: schema.ID, TableID: t.Meta().ID, SchemaName: schema.Name.L, + SchemaState: indexInfo.State, TableName: t.Meta().Name.L, Type: jobTp, BinlogInfo: &model.HistoryInfo{}, - SchemaState: indexInfo.State, - Args: []interface{}{indexName}, - } - - err = d.DoDDLJob(ctx, job) - // index not exists, but if_exists flags is true, so we ignore this error. - if dbterror.ErrCantDropFieldOrKey.Equal(err) && ifExists { - ctx.GetSessionVars().StmtCtx.AppendNote(err) - return nil - } - err = d.callHookOnChanged(job, err) - return errors.Trace(err) -} - -func (d *ddl) DropIndexes(ctx sessionctx.Context, ti ast.Ident, specs []*ast.AlterTableSpec) error { - schema, t, err := d.getSchemaAndTableByIdent(ctx, ti) - if err != nil { - return err - } - - if t.Meta().TableCacheStatusType != model.TableCacheStatusDisable { - return errors.Trace(dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Indexes")) - } - indexNames := make([]model.CIStr, 0, len(specs)) - ifExists := make([]bool, 0, len(specs)) - for _, spec := range specs { - var indexName model.CIStr - if spec.Tp == ast.AlterTableDropPrimaryKey { - indexName = model.NewCIStr(mysql.PrimaryKeyName) - } else { - indexName = model.NewCIStr(spec.Name) - } - - indexInfo := t.Meta().FindIndexByName(indexName.L) - if indexInfo != nil { - _, err := checkIsDropPrimaryKey(indexName, indexInfo, t) - if err != nil { - return err - } - if err := checkDropIndexOnAutoIncrementColumn(t.Meta(), indexInfo); err != nil { - return errors.Trace(err) - } - } - - indexNames = append(indexNames, indexName) - ifExists = append(ifExists, spec.IfExists) - } - - job := &model.Job{ - SchemaID: schema.ID, - TableID: t.Meta().ID, - SchemaName: schema.Name.L, - TableName: t.Meta().Name.L, - Type: model.ActionDropIndexes, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{indexNames, ifExists}, + Args: []interface{}{indexName, ifExists}, } err = d.DoDDLJob(ctx, job) diff --git a/ddl/ddl_api_test.go b/ddl/ddl_api_test.go index 2ad25d70f522d..18db2dfa62170 100644 --- a/ddl/ddl_api_test.go +++ b/ddl/ddl_api_test.go @@ -31,7 +31,6 @@ func TestIsJobRollbackable(t *testing.T) { {model.ActionDropIndex, model.StateDeleteOnly, false}, {model.ActionDropSchema, model.StateDeleteOnly, false}, {model.ActionDropColumn, model.StateDeleteOnly, false}, - {model.ActionDropIndexes, model.StateDeleteOnly, false}, } job := &model.Job{} for _, ca := range cases { diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index 5ebbb3a4c574a..83f83c6dabae6 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -689,55 +689,6 @@ func TestReorg(t *testing.T) { } } -func TestGetDDLInfo(t *testing.T) { - store, clean := newMockStore(t) - defer clean() - - txn, err := store.Begin() - require.NoError(t, err) - m := meta.NewMeta(txn) - - dbInfo2 := &model.DBInfo{ - ID: 2, - Name: model.NewCIStr("b"), - State: model.StateNone, - } - job := &model.Job{ - SchemaID: dbInfo2.ID, - Type: model.ActionCreateSchema, - RowCount: 0, - } - job1 := &model.Job{ - SchemaID: dbInfo2.ID, - Type: model.ActionAddIndex, - RowCount: 0, - } - - err = m.EnQueueDDLJob(job) - require.NoError(t, err) - - info, err := GetDDLInfo(txn) - require.NoError(t, err) - require.Len(t, info.Jobs, 1) - require.Equal(t, job, info.Jobs[0]) - require.Nil(t, info.ReorgHandle) - - // two jobs - m = meta.NewMeta(txn, meta.AddIndexJobListKey) - err = m.EnQueueDDLJob(job1) - require.NoError(t, err) - - info, err = GetDDLInfo(txn) - require.NoError(t, err) - require.Len(t, info.Jobs, 2) - require.Equal(t, job, info.Jobs[0]) - require.Equal(t, job1, info.Jobs[1]) - require.Nil(t, info.ReorgHandle) - - err = txn.Rollback() - require.NoError(t, err) -} - func TestGetDDLJobs(t *testing.T) { store, clean := newMockStore(t) defer clean() @@ -952,7 +903,7 @@ func TestGetHistoryDDLJobs(t *testing.T) { err = AddHistoryDDLJob(m, jobs[i], true) require.NoError(t, err) - historyJobs, err := GetHistoryDDLJobs(txn, DefNumHistoryJobs) + historyJobs, err := GetLastNHistoryDDLJobs(m, DefNumHistoryJobs) require.NoError(t, err) if i+1 > MaxHistoryJobs { @@ -963,7 +914,7 @@ func TestGetHistoryDDLJobs(t *testing.T) { } delta := cnt - MaxHistoryJobs - historyJobs, err := GetHistoryDDLJobs(txn, DefNumHistoryJobs) + historyJobs, err := GetLastNHistoryDDLJobs(m, DefNumHistoryJobs) require.NoError(t, err) require.Len(t, historyJobs, MaxHistoryJobs) diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index b22d11e754609..2e1d50435189f 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -450,7 +450,7 @@ func jobNeedGC(job *model.Job) bool { // After rolling back an AddIndex operation, we need to use delete-range to delete the half-done index data. return true case model.ActionDropSchema, model.ActionDropTable, model.ActionTruncateTable, model.ActionDropIndex, model.ActionDropPrimaryKey, - model.ActionDropTablePartition, model.ActionTruncateTablePartition, model.ActionDropColumn, model.ActionModifyColumn, model.ActionDropIndexes: + model.ActionDropTablePartition, model.ActionTruncateTablePartition, model.ActionDropColumn, model.ActionModifyColumn: return true case model.ActionMultiSchemaChange: for _, sub := range job.MultiSchemaInfo.SubJobs { @@ -933,8 +933,6 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = w.onCreateIndex(d, t, job, true) case model.ActionDropIndex, model.ActionDropPrimaryKey: ver, err = onDropIndex(d, t, job) - case model.ActionDropIndexes: - ver, err = onDropIndexes(d, t, job) case model.ActionRenameIndex: ver, err = onRenameIndex(d, t, job) case model.ActionAddForeignKey: diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index 379c2b34df24a..6c21be950aead 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// + package ddl_test import ( diff --git a/ddl/delete_range.go b/ddl/delete_range.go index 636c5d54edb2e..644ef71eaf874 100644 --- a/ddl/delete_range.go +++ b/ddl/delete_range.go @@ -324,8 +324,9 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, case model.ActionAddIndex, model.ActionAddPrimaryKey: tableID := job.TableID var indexID int64 + var ifExists bool var partitionIDs []int64 - if err := job.DecodeArgs(&indexID, &partitionIDs); err != nil { + if err := job.DecodeArgs(&indexID, &ifExists, &partitionIDs); err != nil { return errors.Trace(err) } if len(partitionIDs) > 0 { @@ -346,9 +347,10 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, case model.ActionDropIndex, model.ActionDropPrimaryKey: tableID := job.TableID var indexName interface{} + var ifExists bool var indexID int64 var partitionIDs []int64 - if err := job.DecodeArgs(&indexName, &indexID, &partitionIDs); err != nil { + if err := job.DecodeArgs(&indexName, &ifExists, &indexID, &partitionIDs); err != nil { return errors.Trace(err) } if len(partitionIDs) > 0 { @@ -366,24 +368,6 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, elemID := ea.allocForIndexID(tableID, indexID) return doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("index ID is %d", indexID)) } - case model.ActionDropIndexes: - var indexIDs []int64 - var partitionIDs []int64 - if err := job.DecodeArgs(&[]model.CIStr{}, &[]bool{}, &indexIDs, &partitionIDs); err != nil { - return errors.Trace(err) - } - // Remove data in TiKV. - if len(indexIDs) == 0 { - return nil - } - if len(partitionIDs) == 0 { - return doBatchDeleteIndiceRange(ctx, s, job.ID, job.TableID, indexIDs, now, ea) - } - for _, pID := range partitionIDs { - if err := doBatchDeleteIndiceRange(ctx, s, job.ID, pID, indexIDs, now, ea); err != nil { - return errors.Trace(err) - } - } case model.ActionDropColumn: var colName model.CIStr var ifExists bool diff --git a/ddl/index.go b/ddl/index.go index 6701a1e905d32..4016cee19f59a 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -46,6 +46,7 @@ import ( "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" + "golang.org/x/exp/slices" ) const ( @@ -634,19 +635,23 @@ func doReorgWorkForCreateIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Jo } func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, indexInfo, err := checkDropIndex(t, job) + tblInfo, indexInfo, ifExists, err := checkDropIndex(t, job) if err != nil { + if ifExists && dbterror.ErrCantDropFieldOrKey.Equal(err) { + job.Warning = toTError(err) + job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) + return ver, nil + } return ver, errors.Trace(err) } if tblInfo.TableCacheStatusType != model.TableCacheStatusDisable { return ver, errors.Trace(dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Index")) } - dependentHiddenCols := make([]*model.ColumnInfo, 0) - for _, indexColumn := range indexInfo.Columns { - if tblInfo.Columns[indexColumn.Offset].Hidden { - dependentHiddenCols = append(dependentHiddenCols, tblInfo.Columns[indexColumn.Offset]) - } + if job.MultiSchemaInfo != nil && !job.IsRollingback() && job.MultiSchemaInfo.Revertible { + job.MarkNonRevertible() + job.SchemaState = indexInfo.State + return updateVersionAndTableInfo(d, t, job, tblInfo, false) } originalState := indexInfo.State @@ -675,24 +680,11 @@ func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { case model.StateDeleteReorganization: // reorganization -> absent indexInfo.State = model.StateNone - if len(dependentHiddenCols) > 0 { - firstHiddenOffset := dependentHiddenCols[0].Offset - for i := 0; i < len(dependentHiddenCols); i++ { - // Set this column's offset to the last and reset all following columns' offsets. - adjustColumnInfoInDropColumn(tblInfo, firstHiddenOffset) - } - } - newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) - for _, idx := range tblInfo.Indices { - if idx.Name.L != indexInfo.Name.L { - newIndices = append(newIndices, idx) - } - } - tblInfo.Indices = newIndices // Set column index flag. dropIndexColumnFlag(tblInfo, indexInfo) + removeDependentHiddenColumns(tblInfo, indexInfo) + removeIndexInfo(tblInfo, indexInfo) - tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-len(dependentHiddenCols)] failpoint.Inject("mockExceedErrorLimit", func(val failpoint.Value) { if val.(bool) { panic("panic test in cancelling add index") @@ -721,197 +713,75 @@ func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { return ver, errors.Trace(err) } -func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.IndexInfo, error) { +func removeDependentHiddenColumns(tblInfo *model.TableInfo, idxInfo *model.IndexInfo) { + hiddenColOffs := make([]int, 0) + for _, indexColumn := range idxInfo.Columns { + col := tblInfo.Columns[indexColumn.Offset] + if col.Hidden { + hiddenColOffs = append(hiddenColOffs, col.Offset) + } + } + // Sort the offset in descending order. + slices.SortFunc(hiddenColOffs, func(a, b int) bool { return a > b }) + // Move all the dependent hidden columns to the end. + endOffset := len(tblInfo.Columns) - 1 + for _, offset := range hiddenColOffs { + tblInfo.MoveColumnInfo(offset, endOffset) + } + tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-len(hiddenColOffs)] +} + +func removeIndexInfo(tblInfo *model.TableInfo, idxInfo *model.IndexInfo) { + indices := tblInfo.Indices + offset := -1 + for i, idx := range indices { + if idxInfo.ID == idx.ID { + offset = i + break + } + } + if offset == -1 { + // The target index has been removed. + return + } + // Remove the target index. + tblInfo.Indices = append(tblInfo.Indices[:offset], tblInfo.Indices[offset+1:]...) +} + +func checkDropIndex(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.IndexInfo, bool /* ifExists */, error) { schemaID := job.SchemaID tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) if err != nil { - return nil, nil, errors.Trace(err) + return nil, nil, false, errors.Trace(err) } var indexName model.CIStr - if err = job.DecodeArgs(&indexName); err != nil { + var ifExists bool + if err = job.DecodeArgs(&indexName, &ifExists); err != nil { job.State = model.JobStateCancelled - return nil, nil, errors.Trace(err) + return nil, nil, false, errors.Trace(err) } indexInfo := tblInfo.FindIndexByName(indexName.L) if indexInfo == nil { job.State = model.JobStateCancelled - return nil, nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) + return nil, nil, ifExists, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) } // Double check for drop index on auto_increment column. err = checkDropIndexOnAutoIncrementColumn(tblInfo, indexInfo) if err != nil { job.State = model.JobStateCancelled - return nil, nil, autoid.ErrWrongAutoKey + return nil, nil, false, autoid.ErrWrongAutoKey } // Check that drop primary index will not cause invisible implicit primary index. if err := checkInvisibleIndexesOnPK(tblInfo, []*model.IndexInfo{indexInfo}, job); err != nil { - return nil, nil, errors.Trace(err) - } - - return tblInfo, indexInfo, nil -} - -func onDropIndexes(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { - tblInfo, indexNames, ifExists, err := getSchemaInfos(t, job) - if err != nil { - return ver, errors.Trace(err) - } - if tblInfo.TableCacheStatusType != model.TableCacheStatusDisable { - return ver, errors.Trace(dbterror.ErrOptOnCacheTable.GenWithStackByArgs("Drop Indexes")) - } - - indexInfos, err := checkDropIndexes(tblInfo, job, indexNames, ifExists) - if err != nil { - return ver, errors.Trace(err) - } - - if len(indexInfos) == 0 { job.State = model.JobStateCancelled - return ver, nil + return nil, nil, false, errors.Trace(err) } - dependentHiddenCols := make([]*model.ColumnInfo, 0) - for _, indexInfo := range indexInfos { - for _, indexColumn := range indexInfo.Columns { - if tblInfo.Columns[indexColumn.Offset].Hidden { - dependentHiddenCols = append(dependentHiddenCols, tblInfo.Columns[indexColumn.Offset]) - } - } - } - - originalState := indexInfos[0].State - switch indexInfos[0].State { - case model.StatePublic: - // public -> write only - setIndicesState(indexInfos, model.StateWriteOnly) - setColumnsState(dependentHiddenCols, model.StateWriteOnly) - for _, colInfo := range dependentHiddenCols { - adjustColumnInfoInDropColumn(tblInfo, colInfo.Offset) - } - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateWriteOnly - case model.StateWriteOnly: - // write only -> delete only - setIndicesState(indexInfos, model.StateDeleteOnly) - setColumnsState(dependentHiddenCols, model.StateDeleteOnly) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateDeleteOnly - case model.StateDeleteOnly: - // delete only -> reorganization - setIndicesState(indexInfos, model.StateDeleteReorganization) - setColumnsState(dependentHiddenCols, model.StateDeleteReorganization) - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfos[0].State) - if err != nil { - return ver, errors.Trace(err) - } - job.SchemaState = model.StateDeleteReorganization - case model.StateDeleteReorganization: - // reorganization -> absent - indexIDs := make([]int64, 0, len(indexInfos)) - indexNames := make(map[string]bool, len(indexInfos)) - for _, indexInfo := range indexInfos { - indexNames[indexInfo.Name.L] = true - indexIDs = append(indexIDs, indexInfo.ID) - } - - newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) - for _, idx := range tblInfo.Indices { - if _, ok := indexNames[idx.Name.L]; !ok { - newIndices = append(newIndices, idx) - } - } - tblInfo.Indices = newIndices - - // Set column index flag. - for _, indexInfo := range indexInfos { - dropIndexColumnFlag(tblInfo, indexInfo) - } - - tblInfo.Columns = tblInfo.Columns[:len(tblInfo.Columns)-len(dependentHiddenCols)] - - ver, err = updateVersionAndTableInfoWithCheck(d, t, job, tblInfo, originalState != model.StateNone) - if err != nil { - return ver, errors.Trace(err) - } - - job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) - job.Args = append(job.Args, indexIDs, getPartitionIDs(tblInfo)) - default: - err = dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", indexInfos[0].State) - } - - return ver, errors.Trace(err) -} - -func getSchemaInfos(t *meta.Meta, job *model.Job) (*model.TableInfo, []model.CIStr, []bool, error) { - schemaID := job.SchemaID - tblInfo, err := GetTableInfoAndCancelFaultJob(t, job, schemaID) - if err != nil { - return nil, nil, nil, errors.Trace(err) - } - - var indexNames []model.CIStr - var ifExists []bool - if err = job.DecodeArgs(&indexNames, &ifExists); err != nil { - return nil, nil, nil, errors.Trace(err) - } - - return tblInfo, indexNames, ifExists, nil -} - -func checkDropIndexes(tblInfo *model.TableInfo, job *model.Job, indexNames []model.CIStr, ifExists []bool) ([]*model.IndexInfo, error) { - var warnings []*errors.Error - indexInfos := make([]*model.IndexInfo, 0, len(indexNames)) - UniqueIndexNames := make(map[model.CIStr]bool, len(indexNames)) - for i, indexName := range indexNames { - // Double check the index is exists. - indexInfo := tblInfo.FindIndexByName(indexName.L) - if indexInfo == nil { - if ifExists[i] { - warnings = append(warnings, toTError(dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName))) - continue - } - job.State = model.JobStateCancelled - return nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) - } - - // Double check for drop index on auto_increment column. - if err := checkDropIndexOnAutoIncrementColumn(tblInfo, indexInfo); err != nil { - job.State = model.JobStateCancelled - return nil, autoid.ErrWrongAutoKey - } - - // Check for dropping duplicate indexes. - if UniqueIndexNames[indexName] { - if !ifExists[i] { - job.State = model.JobStateCancelled - return nil, dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName) - } - warnings = append(warnings, toTError(dbterror.ErrCantDropFieldOrKey.GenWithStack("index %s doesn't exist", indexName))) - } - UniqueIndexNames[indexName] = true - - indexInfos = append(indexInfos, indexInfo) - } - - // Check that drop primary index will not cause invisible implicit primary index. - if err := checkInvisibleIndexesOnPK(tblInfo, indexInfos, job); err != nil { - return nil, errors.Trace(err) - } - - job.MultiSchemaInfo = &model.MultiSchemaInfo{Warnings: warnings} - - return indexInfos, nil + return tblInfo, indexInfo, false, nil } func checkInvisibleIndexesOnPK(tblInfo *model.TableInfo, indexInfos []*model.IndexInfo, job *model.Job) error { diff --git a/ddl/index_modify_test.go b/ddl/index_modify_test.go index 13e423f5b9b66..bec851c44d450 100644 --- a/ddl/index_modify_test.go +++ b/ddl/index_modify_test.go @@ -886,16 +886,18 @@ func testDropIndexesIfExists(t *testing.T, store kv.Storage) { tk.MustQuery("show warnings;").Check(testkit.RowsWithSep("|", "Note|1091|index i3 doesn't exist")) // Verify the impact of deletion order when dropping duplicate indexes. - tk.MustGetErrMsg( + tk.MustGetErrCode( "alter table test_drop_indexes_if_exists drop index i2, drop index i2;", - "[ddl:1091]index i2 doesn't exist", + errno.ErrUnsupportedDDLOperation, ) - tk.MustGetErrMsg( + tk.MustGetErrCode( "alter table test_drop_indexes_if_exists drop index if exists i2, drop index i2;", - "[ddl:1091]index i2 doesn't exist", + errno.ErrUnsupportedDDLOperation, + ) + tk.MustGetErrCode( + "alter table test_drop_indexes_if_exists drop index i2, drop index if exists i2;", + errno.ErrUnsupportedDDLOperation, ) - tk.MustExec("alter table test_drop_indexes_if_exists drop index i2, drop index if exists i2;") - tk.MustQuery("show warnings;").Check(testkit.RowsWithSep("|", "Note|1091|index i2 doesn't exist")) } func testDropIndexesFromPartitionedTable(t *testing.T, store kv.Storage) { @@ -911,7 +913,8 @@ func testDropIndexesFromPartitionedTable(t *testing.T, store kv.Storage) { } tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i2;") tk.MustExec("alter table test_drop_indexes_from_partitioned_table add index i1(c1)") - tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i1;") + tk.MustGetErrCode("alter table test_drop_indexes_from_partitioned_table drop index i1, drop index if exists i1;", + errno.ErrUnsupportedDDLOperation) tk.MustExec("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column c2;") tk.MustExec("alter table test_drop_indexes_from_partitioned_table add column c1 int") tk.MustGetErrCode("alter table test_drop_indexes_from_partitioned_table drop column c1, drop column if exists c1;", diff --git a/ddl/multi_schema_change.go b/ddl/multi_schema_change.go index ca1e60056fae8..20e62d89ed22b 100644 --- a/ddl/multi_schema_change.go +++ b/ddl/multi_schema_change.go @@ -186,6 +186,9 @@ func fillMultiSchemaInfo(info *model.MultiSchemaInfo, job *model.Job) (err error case model.ActionDropColumn: colName := job.Args[0].(model.CIStr) info.DropColumns = append(info.DropColumns, colName) + case model.ActionDropIndex, model.ActionDropPrimaryKey: + indexName := job.Args[0].(model.CIStr) + info.DropIndexes = append(info.DropIndexes, indexName) default: return dbterror.ErrRunMultiSchemaChanges } diff --git a/ddl/multi_schema_change_test.go b/ddl/multi_schema_change_test.go index acc778258834a..73f286a67efd1 100644 --- a/ddl/multi_schema_change_test.go +++ b/ddl/multi_schema_change_test.go @@ -222,6 +222,28 @@ func TestMultiSchemaChangeDropColumnsCancelled(t *testing.T) { tk.MustQuery("select * from t;").Check(testkit.Rows("1 2 3 4")) } +func TestMultiSchemaChangeDropIndexedColumnsCancelled(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + originHook := dom.DDL().GetHook() + + // Test for cancelling the job in a middle state. + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3, d int default 4, " + + "index(a), index(b), index(c), index(d));") + tk.MustExec("insert into t values ();") + hook := newCancelJobHook(store, dom, func(job *model.Job) bool { + // Cancel job when the column 'a' is in delete-reorg. + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StateDeleteReorganization + }) + dom.DDL().SetHook(hook) + tk.MustExec("alter table t drop column b, drop column a, drop column d;") + dom.DDL().SetHook(originHook) + hook.MustCancelFailed(t) + tk.MustQuery("select * from t;").Check(testkit.Rows("3")) +} + func TestMultiSchemaChangeDropColumnsParallel(t *testing.T) { store, clean := testkit.CreateMockStore(t) defer clean() @@ -241,6 +263,115 @@ func TestMultiSchemaChangeDropColumnsParallel(t *testing.T) { }) } +func TestMultiSchemaChangeAddDropColumns(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + // [a, b] -> [+c, -a, +d, -b] -> [c, d] + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t add column c int default 3, drop column a, add column d int default 4, drop column b;") + tk.MustQuery("select * from t;").Check(testkit.Rows("3 4")) + + // [a, b] -> [-a, -b, +c, +d] -> [c, d] + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t drop column a, drop column b, add column c int default 3, add column d int default 4;") + tk.MustQuery("select * from t;").Check(testkit.Rows("3 4")) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int default 1, b int default 2);") + tk.MustExec("insert into t values ();") + tk.MustGetErrCode("alter table t add column c int default 3 after a, add column d int default 4 first, drop column a, drop column b;", errno.ErrUnsupportedDDLOperation) +} + +func TestMultiSchemaChangeDropIndexes(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + + // Test drop same index. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, c int, index t(a));") + tk.MustGetErrCode("alter table t drop index t, drop index t", errno.ErrUnsupportedDDLOperation) + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (id int, c1 int, c2 int, primary key(id) nonclustered, key i1(c1), key i2(c2), key i3(c1, c2));") + tk.MustExec("insert into t values (1, 2, 3);") + tk.MustExec("alter table t drop index i1, drop index i2;") + tk.MustGetErrCode("select * from t use index(i1);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index(i2);", errno.ErrKeyDoesNotExist) + tk.MustExec("alter table t drop index i3, drop primary key;") + tk.MustGetErrCode("select * from t use index(primary);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index(i3);", errno.ErrKeyDoesNotExist) + + // Test drop index with drop column. + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int default 1, b int default 2, c int default 3, index t(a))") + tk.MustExec("insert into t values ();") + tk.MustExec("alter table t drop index t, drop column a") + tk.MustGetErrCode("select * from t force index(t)", errno.ErrKeyDoesNotExist) +} + +func TestMultiSchemaChangeDropIndexesCancelled(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test;") + originHook := dom.DDL().GetHook() + + // Test for cancelling the job in a middle state. + tk.MustExec("create table t (a int, b int, index(a), unique index(b), index idx(a, b));") + hook := newCancelJobHook(store, dom, func(job *model.Job) bool { + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StateDeleteOnly + }) + dom.DDL().SetHook(hook) + tk.MustExec("alter table t drop index a, drop index b, drop index idx;") + dom.DDL().SetHook(originHook) + hook.MustCancelFailed(t) + tk.MustGetErrCode("select * from t use index (a);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index (b);", errno.ErrKeyDoesNotExist) + tk.MustGetErrCode("select * from t use index (idx);", errno.ErrKeyDoesNotExist) + + // Test for cancelling the job in none state. + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, index(a), unique index(b), index idx(a, b));") + hook = newCancelJobHook(store, dom, func(job *model.Job) bool { + return job.MultiSchemaInfo.SubJobs[1].SchemaState == model.StatePublic + }) + dom.DDL().SetHook(hook) + tk.MustGetErrCode("alter table t drop index a, drop index b, drop index idx;", errno.ErrCancelledDDLJob) + dom.DDL().SetHook(originHook) + hook.MustCancelDone(t) + tk.MustQuery("select * from t use index (a);").Check(testkit.Rows()) + tk.MustQuery("select * from t use index (b);").Check(testkit.Rows()) + tk.MustQuery("select * from t use index (idx);").Check(testkit.Rows()) +} + +func TestMultiSchemaChangeDropIndexesParallel(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t (a int, b int, c int, index(a), index(b), index(c));") + putTheSameDDLJobTwice(t, func() { + tk.MustExec("alter table t drop index if exists b, drop index if exists c;") + tk.MustQuery("show warnings").Check(testkit.Rows( + "Note 1091 index b doesn't exist", + "Note 1091 index c doesn't exist")) + }) + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a int, b int, c int, index (a), index(b), index(c));") + putTheSameDDLJobTwice(t, func() { + tk.MustGetErrCode("alter table t drop index b, drop index a;", errno.ErrCantDropFieldOrKey) + }) +} + type cancelOnceHook struct { store kv.Storage triggered bool diff --git a/ddl/rollingback.go b/ddl/rollingback.go index bcb89b591c796..e13a73742bc89 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -60,8 +60,8 @@ func convertAddIdxJob2RollbackJob(d *ddlCtx, t *meta.Meta, job *model.Job, tblIn } } - // the second args will be used in onDropIndex. - job.Args = []interface{}{indexInfo.Name, getPartitionIDs(tblInfo)} + // the second and the third args will be used in onDropIndex. + job.Args = []interface{}{indexInfo.Name, false /* ifExists */, getPartitionIDs(tblInfo)} // If add index job rollbacks in write reorganization state, its need to delete all keys which has been added. // Its work is the same as drop index job do. // The write reorganization state in add index job that likes write only state in drop index job. @@ -208,7 +208,7 @@ func rollingbackDropColumn(t *meta.Meta, job *model.Job) (ver int64, err error) } func rollingbackDropIndex(t *meta.Meta, job *model.Job) (ver int64, err error) { - _, indexInfo, err := checkDropIndex(t, job) + _, indexInfo, _, err := checkDropIndex(t, job) if err != nil { return ver, errors.Trace(err) } @@ -227,43 +227,6 @@ func rollingbackDropIndex(t *meta.Meta, job *model.Job) (ver int64, err error) { } } -func rollingbackDropIndexes(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { - tblInfo, indexNames, ifExists, err := getSchemaInfos(t, job) - if err != nil { - return ver, errors.Trace(err) - } - - indexInfos, err := checkDropIndexes(tblInfo, job, indexNames, ifExists) - if err != nil { - return ver, errors.Trace(err) - } - - indexInfo := indexInfos[0] - originalState := indexInfo.State - switch indexInfo.State { - case model.StateWriteOnly, model.StateDeleteOnly, model.StateDeleteReorganization, model.StateNone: - // We can not rollback now, so just continue to drop index. - // Normally won't fetch here, because there is a check when canceling DDL jobs. See function: IsRollbackable. - job.State = model.JobStateRunning - return ver, nil - case model.StatePublic: - job.State = model.JobStateRollbackDone - for _, indexInfo := range indexInfos { - indexInfo.State = model.StatePublic - } - default: - return ver, dbterror.ErrInvalidDDLState.GenWithStackByArgs("index", indexInfo.State) - } - - job.SchemaState = indexInfo.State - ver, err = updateVersionAndTableInfo(d, t, job, tblInfo, originalState != indexInfo.State) - if err != nil { - return ver, errors.Trace(err) - } - job.FinishTableJob(model.JobStateRollbackDone, model.StatePublic, ver, tblInfo) - return ver, dbterror.ErrCancelledDDLJob -} - func rollingbackAddIndex(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, isPK bool) (ver int64, err error) { // If the value of SnapshotVer isn't zero, it means the work is backfilling the indexes. if job.SchemaState == model.StateWriteReorganization && job.SnapshotVer != 0 { @@ -394,8 +357,6 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) ver, err = rollingbackDropColumn(t, job) case model.ActionDropIndex, model.ActionDropPrimaryKey: ver, err = rollingbackDropIndex(t, job) - case model.ActionDropIndexes: - ver, err = rollingbackDropIndexes(d, t, job) case model.ActionDropTable, model.ActionDropView, model.ActionDropSequence: err = rollingbackDropTableOrView(t, job) case model.ActionDropTablePartition: diff --git a/ddl/sanity_check.go b/ddl/sanity_check.go index e81b9f31b5c5a..9f0f540b20793 100644 --- a/ddl/sanity_check.go +++ b/ddl/sanity_check.go @@ -102,27 +102,21 @@ func expectedDeleteRangeCnt(job *model.Job) (int, error) { return len(physicalTableIDs), nil case model.ActionAddIndex, model.ActionAddPrimaryKey: var indexID int64 + var ifExists bool var partitionIDs []int64 - if err := job.DecodeArgs(&indexID, &partitionIDs); err != nil { + if err := job.DecodeArgs(&indexID, &ifExists, &partitionIDs); err != nil { return 0, errors.Trace(err) } return mathutil.Max(len(partitionIDs), 1), nil case model.ActionDropIndex, model.ActionDropPrimaryKey: var indexName interface{} + var ifNotExists bool var indexID int64 var partitionIDs []int64 - if err := job.DecodeArgs(&indexName, &indexID, &partitionIDs); err != nil { + if err := job.DecodeArgs(&indexName, &ifNotExists, &indexID, &partitionIDs); err != nil { return 0, errors.Trace(err) } return mathutil.Max(len(partitionIDs), 1), nil - case model.ActionDropIndexes: - var indexIDs []int64 - var partitionIDs []int64 - if err := job.DecodeArgs(&[]model.CIStr{}, &[]bool{}, &indexIDs, &partitionIDs); err != nil { - return 0, errors.Trace(err) - } - physicalCnt := mathutil.Max(len(partitionIDs), 1) - return physicalCnt * len(indexIDs), nil case model.ActionDropColumn: var colName model.CIStr var ifExists bool diff --git a/ddl/stat.go b/ddl/stat.go index 561a9be90ebb3..24462f9bb141a 100644 --- a/ddl/stat.go +++ b/ddl/stat.go @@ -15,10 +15,7 @@ package ddl import ( - "context" - "github.com/pingcap/errors" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/variable" ) @@ -51,15 +48,12 @@ func (d *ddl) Stats(vars *variable.SessionVars) (map[string]interface{}, error) m[serverID] = d.uuid var ddlInfo *Info - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) - err := kv.RunInNewTxn(ctx, d.store, false, func(ctx context.Context, txn kv.Transaction) error { - var err1 error - ddlInfo, err1 = GetDDLInfo(txn) - if err1 != nil { - return errors.Trace(err1) - } - return errors.Trace(err1) - }) + s, err := d.sessPool.get() + if err != nil { + return nil, errors.Trace(err) + } + defer d.sessPool.put(s) + ddlInfo, err = GetDDLInfoWithNewTxn(s) if err != nil { return nil, errors.Trace(err) } diff --git a/ddl/stat_test.go b/ddl/stat_test.go index ca91366e4f0d6..6b40869a18ab9 100644 --- a/ddl/stat_test.go +++ b/ddl/stat_test.go @@ -20,6 +20,8 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -86,6 +88,66 @@ func TestDDLStatsInfo(t *testing.T) { } } +func TestGetDDLInfo(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + + sess := testkit.NewTestKit(t, store).Session() + _, err := sess.Execute(context.Background(), "begin") + require.NoError(t, err) + txn, err := sess.Txn(true) + require.NoError(t, err) + + dbInfo2 := &model.DBInfo{ + ID: 2, + Name: model.NewCIStr("b"), + State: model.StateNone, + } + job := &model.Job{ + ID: 1, + SchemaID: dbInfo2.ID, + Type: model.ActionCreateSchema, + RowCount: 0, + } + job1 := &model.Job{ + ID: 2, + SchemaID: dbInfo2.ID, + Type: model.ActionAddIndex, + RowCount: 0, + } + + err = addDDLJobs(txn, job) + require.NoError(t, err) + + info, err := ddl.GetDDLInfo(sess) + require.NoError(t, err) + require.Len(t, info.Jobs, 1) + require.Equal(t, job, info.Jobs[0]) + require.Nil(t, info.ReorgHandle) + + // two jobs + err = addDDLJobs(txn, job1) + require.NoError(t, err) + + info, err = ddl.GetDDLInfo(sess) + require.NoError(t, err) + require.Len(t, info.Jobs, 2) + require.Equal(t, job, info.Jobs[0]) + require.Equal(t, job1, info.Jobs[1]) + require.Nil(t, info.ReorgHandle) + + _, err = sess.Execute(context.Background(), "rollback") + require.NoError(t, err) +} + +func addDDLJobs(txn kv.Transaction, job *model.Job) error { + m := meta.NewMeta(txn) + if job.MayNeedReorg() { + return m.EnQueueDDLJob(job, meta.AddIndexJobListKey) + } + return m.EnQueueDDLJob(job) +} + func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { return &model.Job{ SchemaID: dbInfo.ID, diff --git a/distsql/request_builder.go b/distsql/request_builder.go index f1a0309fd1308..2bd7c5df65c04 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -311,15 +311,8 @@ func (builder *RequestBuilder) SetResourceGroupTagger(tagger tikvrpc.ResourceGro } func (builder *RequestBuilder) verifyTxnScope() error { - // Stale Read uses the calculated TSO for the read, - // so there is no need to check the TxnScope here. - if builder.IsStaleness { - return nil - } - if builder.ReadReplicaScope == "" { - builder.ReadReplicaScope = kv.GlobalReplicaScope - } - if builder.ReadReplicaScope == kv.GlobalReplicaScope || builder.is == nil { + txnScope := builder.TxnScope + if txnScope == "" || txnScope == kv.GlobalReplicaScope || builder.is == nil { return nil } visitPhysicalTableID := make(map[int64]struct{}) @@ -333,7 +326,7 @@ func (builder *RequestBuilder) verifyTxnScope() error { } for phyTableID := range visitPhysicalTableID { - valid := VerifyTxnScope(builder.ReadReplicaScope, phyTableID, builder.is) + valid := VerifyTxnScope(txnScope, phyTableID, builder.is) if !valid { var tblName string var partName string @@ -345,10 +338,10 @@ func (builder *RequestBuilder) verifyTxnScope() error { tblInfo, _ = builder.is.TableByID(phyTableID) tblName = tblInfo.Meta().Name.String() } - err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, builder.ReadReplicaScope) + err := fmt.Errorf("table %v can not be read by %v txn_scope", tblName, txnScope) if len(partName) > 0 { err = fmt.Errorf("table %v's partition %v can not be read by %v txn_scope", - tblName, partName, builder.ReadReplicaScope) + tblName, partName, txnScope) } return err } @@ -356,6 +349,12 @@ func (builder *RequestBuilder) verifyTxnScope() error { return nil } +// SetTxnScope sets request TxnScope +func (builder *RequestBuilder) SetTxnScope(scope string) *RequestBuilder { + builder.TxnScope = scope + return builder +} + // SetReadReplicaScope sets request readReplicaScope func (builder *RequestBuilder) SetReadReplicaScope(scope string) *RequestBuilder { builder.ReadReplicaScope = scope diff --git a/executor/adapter.go b/executor/adapter.go index 89980ab3779ae..d56b26e63b1c7 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -197,8 +197,6 @@ type TelemetryInfo struct { type ExecStmt struct { // GoCtx stores parent go context.Context for a stmt. GoCtx context.Context - // ReplicaReadScope indicates the scope the store selector scope the request visited - ReplicaReadScope string // InfoSchema stores a reference to the schema information. InfoSchema infoschema.InfoSchema // Plan stores a reference to the final physical plan. @@ -229,7 +227,7 @@ func (a ExecStmt) GetStmtNode() ast.StmtNode { } // PointGet short path for point exec directly from plan, keep only necessary steps -func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*recordSet, error) { +func (a *ExecStmt) PointGet(ctx context.Context) (*recordSet, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { span1 := span.Tracer().StartSpan("ExecStmt.PointGet", opentracing.ChildOf(span.Context())) span1.LogKV("sql", a.OriginText()) @@ -240,7 +238,7 @@ func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*rec sessiontxn.RecordAssert(a.Ctx, "assertTxnManagerInShortPointGetPlan", true) // stale read should not reach here staleread.AssertStmtStaleness(a.Ctx, false) - sessiontxn.AssertTxnManagerInfoSchema(a.Ctx, is) + sessiontxn.AssertTxnManagerInfoSchema(a.Ctx, a.InfoSchema) }) ctx = a.observeStmtBeginForTopSQL(ctx) @@ -264,7 +262,7 @@ func (a *ExecStmt) PointGet(ctx context.Context, is infoschema.InfoSchema) (*rec } } if a.PsStmt.Executor == nil { - b := newExecutorBuilder(a.Ctx, is, a.Ti, a.ReplicaReadScope) + b := newExecutorBuilder(a.Ctx, a.InfoSchema, a.Ti) newExecutor := b.build(a.Plan) if b.err != nil { return nil, b.err @@ -317,11 +315,14 @@ func (a *ExecStmt) RebuildPlan(ctx context.Context) (int64, error) { sessiontxn.RecordAssert(a.Ctx, "assertTxnManagerInRebuildPlan", true) sessiontxn.AssertTxnManagerInfoSchema(a.Ctx, ret.InfoSchema) staleread.AssertStmtStaleness(a.Ctx, ret.IsStaleness) + if ret.IsStaleness { + sessiontxn.AssertTxnManagerReadTS(a.Ctx, ret.LastSnapshotTS) + } }) a.InfoSchema = sessiontxn.GetTxnManager(a.Ctx).GetTxnInfoSchema() - a.ReplicaReadScope = ret.ReadReplicaScope - if a.Ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && a.ReplicaReadScope == kv.GlobalReplicaScope { + replicaReadScope := sessiontxn.GetTxnManager(a.Ctx).GetReadReplicaScope() + if a.Ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && replicaReadScope == kv.GlobalReplicaScope { logutil.BgLogger().Warn(fmt.Sprintf("tidb can't read closest replicas due to it haven't %s label", placement.DCLabelKey)) } p, names, err := planner.Optimize(ctx, a.Ctx, a.StmtNode, a.InfoSchema) @@ -824,7 +825,7 @@ func (a *ExecStmt) buildExecutor() (Executor, error) { ctx.GetSessionVars().StmtCtx.Priority = kv.PriorityLow } - b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti, a.ReplicaReadScope) + b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti) e := b.build(a.Plan) if b.err != nil { return nil, errors.Trace(b.err) diff --git a/executor/aggfuncs/func_group_concat.go b/executor/aggfuncs/func_group_concat.go index e838c78f1ff62..7a178cc4c6426 100644 --- a/executor/aggfuncs/func_group_concat.go +++ b/executor/aggfuncs/func_group_concat.go @@ -353,16 +353,17 @@ func (h *topNRows) tryToAdd(row sortRow) (truncated bool, memDelta int64) { for h.currSize > h.limitSize { debt := h.currSize - h.limitSize - if uint64(h.rows[0].buffer.Len()) > debt { + heapPopRow := heap.Pop(h).(sortRow) + if uint64(heapPopRow.buffer.Len()) > debt { h.currSize -= debt - h.rows[0].buffer.Truncate(h.rows[0].buffer.Len() - int(debt)) + heapPopRow.buffer.Truncate(heapPopRow.buffer.Len() - int(debt)) + heap.Push(h, heapPopRow) } else { - h.currSize -= uint64(h.rows[0].buffer.Len()) + h.sepSize - memDelta -= int64(h.rows[0].buffer.Cap()) - for _, dt := range h.rows[0].byItems { + h.currSize -= uint64(heapPopRow.buffer.Len()) + h.sepSize + memDelta -= int64(heapPopRow.buffer.Cap()) + for _, dt := range heapPopRow.byItems { memDelta -= GetDatumMemSize(dt) } - heap.Pop(h) h.isSepTruncated = true } } diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index f36e066505ad1..e804f3069f445 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -1629,3 +1629,17 @@ PARTITION p20220624 VALUES LESS THAN ("20220625") tk.MustQuery("SELECT /*+STREAM_AGG()*/ col1,sum(money) FROM t100 WHERE logtime>='2022-06-09 00:00:00' AND col1=100 ;").Check(testkit.Rows("100 20")) tk.MustQuery("SELECT /*+HASH_AGG()*/ col1,sum(money) FROM t100 WHERE logtime>='2022-06-09 00:00:00' AND col1=100 ;").Check(testkit.Rows("100 20")) } + +// https://github.com/pingcap/tidb/issues/27751 +func TestIssue27751(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table test.t(nname char(20));") + tk.MustExec("insert into test.t values ('2'),(null),('11'),('2'),(null),('2'),(null),('11'),('33');") + tk.MustExec("set @@group_concat_max_len=0;") + tk.MustQuery("select group_concat(nname order by 1 separator '#' ) from t;").Check(testkit.Rows("11#1")) + tk.MustQuery("select group_concat(nname order by 1 desc separator '#' ) from t;").Check(testkit.Rows("33#2")) +} diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index bba2c176d2877..06282390677e4 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -44,7 +44,6 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/stringutil" - "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap/zapcore" ) @@ -291,7 +290,7 @@ func buildHashAggExecutor(ctx sessionctx.Context, src Executor, schema *expressi plan.SetSchema(schema) plan.Init(ctx, nil, 0) plan.SetChildren(nil) - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) exec := b.build(plan) hashAgg := exec.(*HashAggExec) hashAgg.children[0] = src @@ -343,7 +342,7 @@ func buildStreamAggExecutor(ctx sessionctx.Context, srcExec Executor, schema *ex plan = sg } - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) return b.build(plan) } @@ -576,7 +575,7 @@ func buildWindowExecutor(ctx sessionctx.Context, windowFunc string, funcs int, f plan = win } - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) exec := b.build(plan) return exec } @@ -1316,7 +1315,7 @@ func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, keyOff2IdxOff[i] = i } - readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil, oracle.GlobalTxnScope). + readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil). newDataReaderBuilder(&mockPhysicalIndexReader{e: innerDS}) if err != nil { return nil, err @@ -1390,7 +1389,7 @@ func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, inne outerCompareFuncs = append(outerCompareFuncs, expression.GetCmpFunction(nil, outerJoinKeys[i], outerJoinKeys[i])) } - readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil, oracle.GlobalTxnScope). + readerBuilder, err := newExecutorBuilder(tc.ctx, nil, nil). newDataReaderBuilder(&mockPhysicalIndexReader{e: innerDS}) if err != nil { return nil, err diff --git a/executor/builder.go b/executor/builder.go index 8f930f5d7a107..99c4d2b97d26a 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -96,6 +96,7 @@ type executorBuilder struct { Ti *TelemetryInfo // isStaleness means whether this statement use stale read. isStaleness bool + txnScope string readReplicaScope string inUpdateStmt bool inDeleteStmt bool @@ -118,13 +119,15 @@ type CTEStorages struct { IterInTbl cteutil.Storage } -func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, replicaReadScope string) *executorBuilder { +func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo) *executorBuilder { + txnManager := sessiontxn.GetTxnManager(ctx) return &executorBuilder{ ctx: ctx, is: is, Ti: ti, isStaleness: staleread.IsStmtStaleness(ctx), - readReplicaScope: replicaReadScope, + txnScope: txnManager.GetTxnScope(), + readReplicaScope: txnManager.GetReadReplicaScope(), } } @@ -142,9 +145,9 @@ type MockExecutorBuilder struct { } // NewMockExecutorBuilderForTest is ONLY used in test. -func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo, replicaReadScope string) *MockExecutorBuilder { +func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSchema, ti *TelemetryInfo) *MockExecutorBuilder { return &MockExecutorBuilder{ - executorBuilder: newExecutorBuilder(ctx, is, ti, replicaReadScope)} + executorBuilder: newExecutorBuilder(ctx, is, ti)} } // Build builds an executor tree according to `p`. @@ -348,13 +351,14 @@ func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) Executor { b.err = err return nil } - txn, err := e.ctx.Txn(true) + + session, err := e.getSysSession() if err != nil { b.err = err return nil } - - ddlInfo, err := ddl.GetDDLInfo(txn) + ddlInfo, err := ddl.GetDDLInfoWithNewTxn(session) + e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) if err != nil { b.err = err return nil @@ -731,7 +735,7 @@ func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { failpoint.Inject("assertExecutePrepareStatementStalenessOption", func(val failpoint.Value) { vs := strings.Split(val.(string), "_") - assertTS, assertTxnScope := vs[0], vs[1] + assertTS, assertReadReplicaScope := vs[0], vs[1] staleread.AssertStmtStaleness(b.ctx, true) ts, err := sessiontxn.GetTxnManager(b.ctx).GetStmtReadTS() if err != nil { @@ -739,7 +743,7 @@ func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { } if strconv.FormatUint(ts, 10) != assertTS || - assertTxnScope != b.readReplicaScope { + assertReadReplicaScope != b.readReplicaScope { panic("execute prepare statement have wrong staleness option") } }) @@ -1539,9 +1543,9 @@ func (b *executorBuilder) getSnapshot() (kv.Snapshot, error) { txnManager := sessiontxn.GetTxnManager(b.ctx) if b.inInsertStmt || b.inUpdateStmt || b.inDeleteStmt || b.inSelectLockStmt { - snapshot, err = txnManager.GetForUpdateSnapshot() + snapshot, err = txnManager.GetSnapshotWithStmtForUpdateTS() } else { - snapshot, err = txnManager.GetReadSnapshot() + snapshot, err = txnManager.GetSnapshotWithStmtReadTS() } if err != nil { return nil, err @@ -3110,6 +3114,7 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, + txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, table: tbl, @@ -3390,6 +3395,7 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, + txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, physicalTableID: physicalTableID, @@ -4062,6 +4068,7 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T SetStartTS(startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). diff --git a/executor/compiler.go b/executor/compiler.go index eccc3b2418776..f4633c85f70c6 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -91,16 +91,15 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStm lowerPriority = needLowerPriority(finalPlan) } return &ExecStmt{ - GoCtx: ctx, - ReplicaReadScope: ret.ReadReplicaScope, - InfoSchema: is, - Plan: finalPlan, - LowerPriority: lowerPriority, - Text: stmtNode.Text(), - StmtNode: stmtNode, - Ctx: c.Ctx, - OutputNames: names, - Ti: &TelemetryInfo{}, + GoCtx: ctx, + InfoSchema: is, + Plan: finalPlan, + LowerPriority: lowerPriority, + Text: stmtNode.Text(), + StmtNode: stmtNode, + Ctx: c.Ctx, + OutputNames: names, + Ti: &TelemetryInfo{}, }, nil } diff --git a/executor/coprocessor.go b/executor/coprocessor.go index 5df6528d72301..93a23dd6829ca 100644 --- a/executor/coprocessor.go +++ b/executor/coprocessor.go @@ -32,7 +32,6 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" - "github.com/tikv/client-go/v2/oracle" ) // CoprocessorDAGHandler uses to handle cop dag request. @@ -170,7 +169,7 @@ func (h *CoprocessorDAGHandler) buildDAGExecutor(req *coprocessor.Request) (Exec } plan = core.InjectExtraProjection(plan) // Build executor. - b := newExecutorBuilder(h.sctx, is, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(h.sctx, is, nil) return b.build(plan), nil } diff --git a/executor/distsql.go b/executor/distsql.go index d20e11ae65df4..c843bfa9ca586 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -171,6 +171,7 @@ type IndexReaderExecutor struct { kvRanges []kv.KeyRange dagPB *tipb.DAGRequest startTS uint64 + txnScope string readReplicaScope string isStaleness bool // result returns one or more distsql.PartialResult and each PartialResult is returned by one region. @@ -308,6 +309,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -582,6 +584,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetPaging(e.indexPaging). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -680,6 +683,7 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookup table: table, dagPB: e.tableRequest, startTS: e.startTS, + txnScope: e.txnScope, readReplicaScope: e.readReplicaScope, isStaleness: e.isStaleness, columns: e.columns, diff --git a/executor/executor.go b/executor/executor.go index 76ad77ac68b90..7d2839cb29e34 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -68,7 +68,6 @@ import ( topsqlstate "github.com/pingcap/tidb/util/topsql/state" tikverr "github.com/tikv/client-go/v2/error" tikvstore "github.com/tikv/client-go/v2/kv" - "github.com/tikv/client-go/v2/oracle" tikvutil "github.com/tikv/client-go/v2/util" atomicutil "go.uber.org/atomic" "go.uber.org/zap" @@ -598,11 +597,12 @@ func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { if err != nil { return err } - jobs, err := ddl.GetAllDDLJobs(meta.NewMeta(txn)) + m := meta.NewMeta(txn) + jobs, err := ddl.GetAllDDLJobs(m) if err != nil { return err } - historyJobs, err := ddl.GetHistoryDDLJobs(txn, ddl.DefNumHistoryJobs) + historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, ddl.DefNumHistoryJobs) if err != nil { return err } @@ -1299,7 +1299,7 @@ func init() { ctx = opentracing.ContextWithSpan(ctx, span1) } - e := newExecutorBuilder(sctx, is, nil, oracle.GlobalTxnScope) + e := newExecutorBuilder(sctx, is, nil) exec := e.build(p) if e.err != nil { return nil, e.err diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index 8ee1224a8fe6a..d378185361f00 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -38,7 +38,6 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/stretchr/testify/require" - "github.com/tikv/client-go/v2/oracle" ) type requiredRowsDataSource struct { @@ -846,7 +845,7 @@ func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, i j.CompareFuncs = append(j.CompareFuncs, expression.GetCmpFunction(nil, j.LeftJoinKeys[i], j.RightJoinKeys[i])) } - b := newExecutorBuilder(ctx, nil, nil, oracle.GlobalTxnScope) + b := newExecutorBuilder(ctx, nil, nil) return b.build(j) } diff --git a/executor/executor_test.go b/executor/executor_test.go index e426d78227606..3d68a859c6dff 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -3473,7 +3473,7 @@ func TestUnreasonablyClose(t *testing.T) { err = sessiontxn.GetTxnManager(tk.Session()).OnStmtStart(context.TODO(), stmt) require.NoError(t, err, comment) - executorBuilder := executor.NewMockExecutorBuilderForTest(tk.Session(), is, nil, oracle.GlobalTxnScope) + executorBuilder := executor.NewMockExecutorBuilderForTest(tk.Session(), is, nil) p, _, _ := planner.Optimize(context.TODO(), tk.Session(), stmt, is) require.NotNil(t, p) @@ -5623,9 +5623,10 @@ func TestAdmin(t *testing.T) { require.NoError(t, err) row = req.GetRow(0) require.Equal(t, 6, row.Len()) - txn, err := store.Begin() - require.NoError(t, err) - ddlInfo, err := ddl.GetDDLInfo(txn) + tk = testkit.NewTestKit(t, store) + tk.MustExec("begin") + sess := tk.Session() + ddlInfo, err := ddl.GetDDLInfo(sess) require.NoError(t, err) require.Equal(t, ddlInfo.SchemaVer, row.GetInt64(0)) // TODO: Pass this test. @@ -5640,8 +5641,7 @@ func TestAdmin(t *testing.T) { err = r.Next(ctx, req) require.NoError(t, err) require.Zero(t, req.NumRows()) - err = txn.Rollback() - require.NoError(t, err) + tk.MustExec("rollback") // show DDL jobs test r, err = tk.Exec("admin show ddl jobs") @@ -5651,9 +5651,9 @@ func TestAdmin(t *testing.T) { require.NoError(t, err) row = req.GetRow(0) require.Equal(t, 12, row.Len()) - txn, err = store.Begin() + txn, err := store.Begin() require.NoError(t, err) - historyJobs, err := ddl.GetHistoryDDLJobs(txn, ddl.DefNumHistoryJobs) + historyJobs, err := ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), ddl.DefNumHistoryJobs) require.Greater(t, len(historyJobs), 1) require.Greater(t, len(row.GetString(1)), 0) require.NoError(t, err) @@ -5678,7 +5678,7 @@ func TestAdmin(t *testing.T) { result.Check(testkit.Rows()) result = tk.MustQuery(`admin show ddl job queries 1, 2, 3, 4`) result.Check(testkit.Rows()) - historyJobs, err = ddl.GetHistoryDDLJobs(txn, ddl.DefNumHistoryJobs) + historyJobs, err = ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), ddl.DefNumHistoryJobs) result = tk.MustQuery(fmt.Sprintf("admin show ddl job queries %d", historyJobs[0].ID)) result.Check(testkit.Rows(historyJobs[0].Query)) require.NoError(t, err) @@ -5742,7 +5742,7 @@ func TestAdmin(t *testing.T) { // Test for reverse scan get history ddl jobs when ddl history jobs queue has multiple regions. txn, err = store.Begin() require.NoError(t, err) - historyJobs, err = ddl.GetHistoryDDLJobs(txn, 20) + historyJobs, err = ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), 20) require.NoError(t, err) // Split region for history ddl job queues. @@ -5751,7 +5751,7 @@ func TestAdmin(t *testing.T) { endKey := meta.DDLJobHistoryKey(m, historyJobs[0].ID) cluster.SplitKeys(startKey, endKey, int(historyJobs[0].ID/5)) - historyJobs2, err := ddl.GetHistoryDDLJobs(txn, 20) + historyJobs2, err := ddl.GetLastNHistoryDDLJobs(meta.NewMeta(txn), 20) require.NoError(t, err) require.Equal(t, historyJobs2, historyJobs) } diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index e3b8e6391f2f5..3ae11e92f484b 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -304,6 +304,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, SetStartTS(e.startTS). SetDesc(e.descs[workID]). SetKeepOrder(false). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). @@ -383,6 +384,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, baseExecutor: newBaseExecutor(e.ctx, ts.Schema(), e.getPartitalPlanID(workID)), dagPB: e.dagPBs[workID], startTS: e.startTS, + txnScope: e.txnScope, readReplicaScope: e.readReplicaScope, isStaleness: e.isStaleness, feedback: statistics.NewQueryFeedback(0, nil, 0, false), @@ -603,6 +605,7 @@ func (e *IndexMergeReaderExecutor) buildFinalTableReader(ctx context.Context, tb table: tbl, dagPB: e.tableRequest, startTS: e.startTS, + txnScope: e.txnScope, readReplicaScope: e.readReplicaScope, isStaleness: e.isStaleness, columns: e.columns, diff --git a/executor/mpp_gather.go b/executor/mpp_gather.go index a9a6032d1f779..42526774dbdd5 100644 --- a/executor/mpp_gather.go +++ b/executor/mpp_gather.go @@ -77,7 +77,9 @@ func (e *MPPGather) appendMPPDispatchReq(pf *plannercore.Fragment) error { if err != nil { return errors.Trace(err) } - logutil.BgLogger().Info("Dispatch mpp task", zap.Uint64("timestamp", mppTask.StartTs), zap.Int64("ID", mppTask.ID), zap.String("address", mppTask.Meta.GetAddress()), zap.String("plan", plannercore.ToString(pf.ExchangeSender))) + logutil.BgLogger().Info("Dispatch mpp task", zap.Uint64("timestamp", mppTask.StartTs), + zap.Int64("ID", mppTask.ID), zap.String("address", mppTask.Meta.GetAddress()), + zap.String("plan", plannercore.ToString(pf.ExchangeSender))) req := &kv.MPPDispatchRequest{ Data: pbData, Meta: mppTask.Meta, diff --git a/executor/plan_replayer.go b/executor/plan_replayer.go index 156e79b1306d8..99868e19d1d52 100644 --- a/executor/plan_replayer.go +++ b/executor/plan_replayer.go @@ -129,6 +129,7 @@ func (e *PlanReplayerSingleExec) dumpSingle(path string) (fileName string, err e // Generate key and create zip file time := time.Now().UnixNano() b := make([]byte, 16) + //nolint: gosec _, err = rand.Read(b) if err != nil { return "", err diff --git a/executor/point_get.go b/executor/point_get.go index 881aa758ecd1f..04687403c0d4e 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -57,6 +57,7 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { e := &PointGetExecutor{ baseExecutor: newBaseExecutor(b.ctx, p.Schema(), p.ID()), + txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, } @@ -115,6 +116,7 @@ type PointGetExecutor struct { idxKey kv.Key handleVal []byte idxVals []types.Datum + txnScope string readReplicaScope string isStaleness bool txn kv.Transaction @@ -433,15 +435,10 @@ func (e *PointGetExecutor) get(ctx context.Context, key kv.Key) ([]byte, error) } func (e *PointGetExecutor) verifyTxnScope() error { - // Stale Read uses the calculated TSO for the read, - // so there is no need to check the TxnScope here. - if e.isStaleness { - return nil - } - txnScope := e.readReplicaScope - if txnScope == "" || txnScope == kv.GlobalTxnScope { + if e.txnScope == "" || e.txnScope == kv.GlobalTxnScope { return nil } + var tblID int64 var tblName string var partName string @@ -456,16 +453,16 @@ func (e *PointGetExecutor) verifyTxnScope() error { tblInfo, _ := is.TableByID(tblID) tblName = tblInfo.Meta().Name.String() } - valid := distsql.VerifyTxnScope(txnScope, tblID, is) + valid := distsql.VerifyTxnScope(e.txnScope, tblID, is) if valid { return nil } if len(partName) > 0 { return dbterror.ErrInvalidPlacementPolicyCheck.GenWithStackByArgs( - fmt.Sprintf("table %v's partition %v can not be read by %v txn_scope", tblName, partName, txnScope)) + fmt.Sprintf("table %v's partition %v can not be read by %v txn_scope", tblName, partName, e.txnScope)) } return dbterror.ErrInvalidPlacementPolicyCheck.GenWithStackByArgs( - fmt.Sprintf("table %v can not be read by %v txn_scope", tblName, txnScope)) + fmt.Sprintf("table %v can not be read by %v txn_scope", tblName, e.txnScope)) } // EncodeUniqueIndexKey encodes a unique index key. diff --git a/executor/prepared.go b/executor/prepared.go index abe974e899310..073b583150df6 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -32,7 +32,6 @@ import ( plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" - "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" @@ -333,14 +332,11 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { // CompileExecutePreparedStmt compiles a session Execute command to a stmt.Statement. func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context, - execStmt *ast.ExecuteStmt, is infoschema.InfoSchema, snapshotTS uint64, replicaReadScope string, args []types.Datum) (*ExecStmt, bool, bool, error) { + execStmt *ast.ExecuteStmt, is infoschema.InfoSchema) (*ExecStmt, bool, bool, error) { startTime := time.Now() defer func() { sctx.GetSessionVars().DurationCompile = time.Since(startTime) }() - isStaleness := snapshotTS != 0 - sctx.GetSessionVars().StmtCtx.IsStaleness = isStaleness - execStmt.BinaryArgs = args execPlan, names, err := planner.Optimize(ctx, sctx, execStmt, is) if err != nil { return nil, false, false, err @@ -349,21 +345,16 @@ func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context, failpoint.Inject("assertTxnManagerInCompile", func() { sessiontxn.RecordAssert(sctx, "assertTxnManagerInCompile", true) sessiontxn.AssertTxnManagerInfoSchema(sctx, is) - staleread.AssertStmtStaleness(sctx, snapshotTS != 0) - if snapshotTS != 0 { - sessiontxn.AssertTxnManagerReadTS(sctx, snapshotTS) - } }) stmt := &ExecStmt{ - GoCtx: ctx, - InfoSchema: is, - Plan: execPlan, - StmtNode: execStmt, - Ctx: sctx, - OutputNames: names, - Ti: &TelemetryInfo{}, - ReplicaReadScope: replicaReadScope, + GoCtx: ctx, + InfoSchema: is, + Plan: execPlan, + StmtNode: execStmt, + Ctx: sctx, + OutputNames: names, + Ti: &TelemetryInfo{}, } if preparedPointer, ok := sctx.GetSessionVars().PreparedStmts[execStmt.ExecID]; ok { preparedObj, ok := preparedPointer.(*plannercore.CachedPrepareStmt) diff --git a/executor/seqtest/BUILD.bazel b/executor/seqtest/BUILD.bazel index c248e2e1fd30a..50c4bbdb65c55 100644 --- a/executor/seqtest/BUILD.bazel +++ b/executor/seqtest/BUILD.bazel @@ -13,7 +13,6 @@ go_test( "//ddl/util", "//errno", "//executor", - "//infoschema", "//kv", "//meta/autoid", "//metrics", diff --git a/executor/seqtest/prepared_test.go b/executor/seqtest/prepared_test.go index b4b018e8ac3ee..4e1ff9ed02e4d 100644 --- a/executor/seqtest/prepared_test.go +++ b/executor/seqtest/prepared_test.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" @@ -158,10 +157,10 @@ func TestPrepared(t *testing.T) { require.NoError(t, err) tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows()) - execStmt := &ast.ExecuteStmt{ExecID: stmtID} + execStmt := &ast.ExecuteStmt{ExecID: stmtID, BinaryArgs: []types.Datum{types.NewDatum(1)}} // Check that ast.Statement created by executor.CompileExecutePreparedStmt has query text. stmt, _, _, err := executor.CompileExecutePreparedStmt(context.TODO(), tk.Session(), execStmt, - tk.Session().GetInfoSchema().(infoschema.InfoSchema), 0, kv.GlobalReplicaScope, []types.Datum{types.NewDatum(1)}) + tk.Session().GetInfoSchema().(infoschema.InfoSchema)) require.NoError(t, err) require.Equal(t, query, stmt.OriginText()) diff --git a/executor/set_test.go b/executor/set_test.go index 4eefe6a4e4e57..f91d1f1e80388 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -1834,3 +1834,45 @@ func TestGcMaxWaitTime(t *testing.T) { tk.MustExec("set global tidb_gc_life_time = \"72h\"") tk.MustExec("set global tidb_gc_max_wait_time = 1000") } + +func TestTiFlashFineGrainedShuffle(t *testing.T) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + + // Default is -1. + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("-1")) + + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("-1")) + // Min val is -1. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -2") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("-1")) + + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 0") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("0")) + + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 1024") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("1024")) + // Max val is 1024. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 1025") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_stream_count;").Check(testkit.Rows("1024")) + + // Default is 8192. + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("8192")) + + // Min is 1. + tk.MustExec("set @@tiflash_fine_grained_shuffle_batch_size = 0") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("1")) + tk.MustExec("set @@tiflash_fine_grained_shuffle_batch_size = -1") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("1")) + + // Max is uint64_max. + tk.MustExec("set @@tiflash_fine_grained_shuffle_batch_size = 18446744073709551615") + tk.MustQuery("select @@tiflash_fine_grained_shuffle_batch_size;").Check(testkit.Rows("18446744073709551615")) + + // Test set global. + tk.MustExec("set global tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set global tiflash_fine_grained_shuffle_batch_size = 8192") +} diff --git a/executor/slow_query.go b/executor/slow_query.go index 5661fc343c8c1..c346b9323c6a9 100755 --- a/executor/slow_query.go +++ b/executor/slow_query.go @@ -855,6 +855,7 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co } if e.extractor == nil || !e.extractor.Enable { totalFileNum = 1 + //nolint: gosec file, err := os.Open(logFilePath) if err != nil { if os.IsNotExist(err) { diff --git a/executor/table_reader.go b/executor/table_reader.go index 30fedbd85737d..2d6362569ac85 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -79,6 +79,7 @@ type TableReaderExecutor struct { kvRanges []kv.KeyRange dagPB *tipb.DAGRequest startTS uint64 + txnScope string readReplicaScope string isStaleness bool // columns are only required by union scan and virtual column. @@ -332,6 +333,7 @@ func (e *TableReaderExecutor) buildKVReqSeparately(ctx context.Context, ranges [ SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). @@ -370,6 +372,7 @@ func (e *TableReaderExecutor) buildKVReqForPartitionTableScan(ctx context.Contex SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetFromSessionVars(e.ctx.GetSessionVars()). SetFromInfoSchema(e.ctx.GetInfoSchema()). @@ -400,6 +403,7 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). + SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). SetFromSessionVars(e.ctx.GetSessionVars()). diff --git a/executor/trace.go b/executor/trace.go index d5ed5128c9ba3..56d6d88170db9 100644 --- a/executor/trace.go +++ b/executor/trace.go @@ -362,6 +362,7 @@ func generateOptimizerTraceFile() (*os.File, string, error) { // Generate key and create zip file time := time.Now().UnixNano() b := make([]byte, 16) + //nolint: gosec _, err = rand.Read(b) if err != nil { return nil, "", errors.AddStack(err) diff --git a/expression/builtin_encryption.go b/expression/builtin_encryption.go index 2171cf92a6304..4229f03402422 100644 --- a/expression/builtin_encryption.go +++ b/expression/builtin_encryption.go @@ -581,6 +581,7 @@ func (b *builtinRandomBytesSig) evalString(row chunk.Row) (string, bool, error) return "", false, types.ErrOverflow.GenWithStackByArgs("length", "random_bytes") } buf := make([]byte, val) + //nolint: gosec if n, err := rand.Read(buf); err != nil { return "", true, err } else if int64(n) != val { diff --git a/go.mod b/go.mod index fdf5368b379f2..13aff07d173b8 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( cloud.google.com/go/storage v1.21.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.12.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0 - github.com/BurntSushi/toml v0.4.1 + github.com/BurntSushi/toml v1.1.0 github.com/DATA-DOG/go-sqlmock v1.5.0 github.com/Jeffail/gabs/v2 v2.5.1 github.com/Shopify/sarama v1.29.0 @@ -46,15 +46,15 @@ require ( github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c github.com/pingcap/failpoint v0.0.0-20220423142525-ae43b7f4e5c3 github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305 + github.com/pingcap/kvproto v0.0.0-20220705053936-aa9c2d20cd2a github.com/pingcap/log v1.1.0 github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 github.com/pingcap/tidb/parser v0.0.0-20211011031125-9b13dc409c5e - github.com/pingcap/tipb v0.0.0-20220602075447-4847c5d68e73 + github.com/pingcap/tipb v0.0.0-20220704030114-0f4f873beca8 github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.32.1 - github.com/shirou/gopsutil/v3 v3.21.12 + github.com/shirou/gopsutil/v3 v3.22.4 github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/soheilhy/cmux v0.1.5 @@ -100,6 +100,8 @@ require ( github.com/charithe/durationcheck v0.0.9 github.com/daixiang0/gci v0.3.4 github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a + github.com/golangci/golangci-lint v1.46.2 + github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb github.com/golangci/misspell v0.3.5 github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 @@ -107,12 +109,13 @@ require ( github.com/kyoh86/exportloopref v0.1.8 github.com/nishanths/predeclared v0.2.2 github.com/tdakkota/asciicheck v0.1.1 - honnef.co/go/tools v0.0.1-2020.1.4 + honnef.co/go/tools v0.3.1 ) require ( github.com/hexops/gotextdiff v1.0.3 // indirect github.com/kisielk/gotool v1.0.0 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect ) require ( @@ -142,7 +145,6 @@ require ( github.com/eapache/queue v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.1 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -190,8 +192,8 @@ require ( github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/stathat/consistent v1.0.0 // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect @@ -214,10 +216,10 @@ require ( golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.28.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect sigs.k8s.io/yaml v1.2.0 // indirect diff --git a/go.sum b/go.sum index 5d700817a2652..e6c81e2bb20a7 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1/go.mod h1:KLF4gFr6DcKFZwSu github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0 h1:62Ew5xXg5UCGIXDOM7+y4IL5/6mQJq1nenhBCJAeGX8= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.2.0/go.mod h1:eHWhQKXc1Gv1DvWH//UzgWjWFEo0Pp4pH2vBzjBw8Fc= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= @@ -253,8 +254,7 @@ github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebP github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsouza/fake-gcs-server v1.19.0 h1:XyaGOlqo+R5sjT03x2ymk0xepaQlgwhRLTT2IopW0zA= github.com/fsouza/fake-gcs-server v1.19.0/go.mod h1:JtXHY/QzHhtyIxsNfIuQ+XgHtRb5B/w8nqbL5O8zqo0= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= @@ -347,6 +347,10 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.46.2 h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo= +github.com/golangci/golangci-lint v1.46.2/go.mod h1:3DkdHnxn9eoTTrpT2gB0TEv8KSziuoqe9FitgQLHvAY= +github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc= +github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb/go.mod h1:ON/c2UR0VAAv6ZEAFKhjCLplESSmRFfZcDLASbI1GWo= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= @@ -600,6 +604,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/directio v1.0.4/go.mod h1:CKGdcN7StAaqjT7Qack3lAXeX4pjnyc46YeqZH1yWVY= github.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4= github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk= @@ -661,8 +667,9 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20220302110454-c696585a961b/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305 h1:TZ0teMZoKHnZDlJxNkWrp5Sgv3w+ruNbrqtBYKsfaNw= github.com/pingcap/kvproto v0.0.0-20220525022339-6aaebf466305/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= +github.com/pingcap/kvproto v0.0.0-20220705053936-aa9c2d20cd2a h1:nP2wmyw9JTRsk5rm+tZtfAso6c/1FvuaFNbXTaYz3FE= +github.com/pingcap/kvproto v0.0.0-20220705053936-aa9c2d20cd2a/go.mod h1:OYtxs0786qojVTmkVeufx93xe+jUgm56GUYRIKnmaGI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= @@ -671,8 +678,8 @@ github.com/pingcap/log v1.1.0 h1:ELiPxACz7vdo1qAvvaWJg1NrYFoY6gqAh/+Uo6aXdD8= github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4 h1:HYbcxtnkN3s5tqrZ/z3eJS4j3Db8wMphEm1q10lY/TM= github.com/pingcap/sysutil v0.0.0-20220114020952-ea68d2dbf5b4/go.mod h1:sDCsM39cGiv2vwunZkaFA917vVkqDTGSPbbV7z4Oops= -github.com/pingcap/tipb v0.0.0-20220602075447-4847c5d68e73 h1:L4nZwfYSrIsWPAZR8zMwHaNQJy0Rjy3Od6Smj5mlOms= -github.com/pingcap/tipb v0.0.0-20220602075447-4847c5d68e73/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= +github.com/pingcap/tipb v0.0.0-20220704030114-0f4f873beca8 h1:oYn6UiUSnVlMBr4rLOweNWtdAon5wCLnLGDSFf/8kMA= +github.com/pingcap/tipb v0.0.0-20220704030114-0f4f873beca8/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4 h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -733,8 +740,9 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.21.12 h1:VoGxEW2hpmz0Vt3wUvHIl9fquzYLNpVpgNNB7pGJimA= github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= +github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 h1:mj/nMDAwTBiaCqMEs4cYCqF7pO6Np7vhy1D1wcQGz+E= @@ -771,12 +779,14 @@ github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4Cc github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df h1:rh3VYpfvzXRbJ90ymx1yfhGl/wq8ac2m/cUbao61kwY= github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= @@ -788,10 +798,12 @@ github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd h1:VAyYcN1Nw7R github.com/tikv/client-go/v2 v2.0.1-0.20220627063500-947d923945fd/go.mod h1:uoZHYWKB+PsDueEnZ0EvF5zvNJPEauEWN26Tgi7qvNI= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710 h1:jxgmKOscXSjaFEKQGRyY5qOpK8hLqxs2irb/uDJMtwk= github.com/tikv/pd/client v0.0.0-20220307081149-841fa61e9710/go.mod h1:AtvppPwkiyUgQlR1W9qSqfTB+OsOIu19jDCOxOsPkmU= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1150,6 +1162,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU= golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1246,8 +1259,9 @@ golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1411,8 +1425,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/kv/kv.go b/kv/kv.go index 39dbcc7c9bf78..d65d6498c12d1 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -356,6 +356,8 @@ type Request struct { TaskID uint64 // TiDBServerID is the specified TiDB serverID to execute request. `0` means all TiDB instances. TiDBServerID uint64 + // TxnScope is the scope of the txn + TxnScope string // ReadReplicaScope is the scope of the read replica. ReadReplicaScope string // IsStaleness indicates whether the request read staleness data diff --git a/meta/meta.go b/meta/meta.go index 8a8ec96687404..0ee38539c85b5 100644 --- a/meta/meta.go +++ b/meta/meta.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "math" - "sort" "strconv" "strings" "sync" @@ -1016,36 +1015,11 @@ func (m *Meta) GetHistoryDDLJob(id int64) (*model.Job, error) { return job, errors.Trace(err) } -// GetAllHistoryDDLJobs gets all history DDL jobs. -func (m *Meta) GetAllHistoryDDLJobs() ([]*model.Job, error) { - pairs, err := m.txn.HGetAll(mDDLJobHistoryKey) - if err != nil { - return nil, errors.Trace(err) - } - jobs, err := decodeJob(pairs) - if err != nil { - return nil, errors.Trace(err) - } - // sort job. - sorter := &jobsSorter{jobs: jobs} - sort.Sort(sorter) - return jobs, nil -} - // GetHistoryDDLCount the count of all history DDL jobs. func (m *Meta) GetHistoryDDLCount() (uint64, error) { return m.txn.HGetLen(mDDLJobHistoryKey) } -// GetLastNHistoryDDLJobs gets latest N history ddl jobs. -func (m *Meta) GetLastNHistoryDDLJobs(num int) ([]*model.Job, error) { - pairs, err := m.txn.HGetLastN(mDDLJobHistoryKey, num) - if err != nil { - return nil, errors.Trace(err) - } - return decodeJob(pairs) -} - // LastJobIterator is the iterator for gets latest history. type LastJobIterator interface { GetLastJobs(num int, jobs []*model.Job) ([]*model.Job, error) @@ -1089,36 +1063,6 @@ func (i *HLastJobIterator) GetLastJobs(num int, jobs []*model.Job) ([]*model.Job return jobs, nil } -func decodeJob(jobPairs []structure.HashPair) ([]*model.Job, error) { - jobs := make([]*model.Job, 0, len(jobPairs)) - for _, pair := range jobPairs { - job := &model.Job{} - err := job.Decode(pair.Value) - if err != nil { - return nil, errors.Trace(err) - } - jobs = append(jobs, job) - } - return jobs, nil -} - -// jobsSorter implements the sort.Interface interface. -type jobsSorter struct { - jobs []*model.Job -} - -func (s *jobsSorter) Swap(i, j int) { - s.jobs[i], s.jobs[j] = s.jobs[j], s.jobs[i] -} - -func (s *jobsSorter) Len() int { - return len(s.jobs) -} - -func (s *jobsSorter) Less(i, j int) bool { - return s.jobs[i].ID < s.jobs[j].ID -} - // GetBootstrapVersion returns the version of the server which bootstrap the store. // If the store is not bootstraped, the version will be zero. func (m *Meta) GetBootstrapVersion() (int64, error) { diff --git a/meta/meta_test.go b/meta/meta_test.go index 024e774615b1d..7174eae38c66c 100644 --- a/meta/meta_test.go +++ b/meta/meta_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/parser/model" @@ -564,7 +565,7 @@ func TestDDL(t *testing.T) { historyJob2.Args = append(job.Args, arg) err = m.AddHistoryDDLJob(historyJob2, false) require.NoError(t, err) - all, err := m.GetAllHistoryDDLJobs() + all, err := ddl.GetAllHistoryDDLJobs(m) require.NoError(t, err) var lastID int64 for _, job := range all { @@ -581,7 +582,7 @@ func TestDDL(t *testing.T) { } // Test for get last N history ddl jobs. - historyJobs, err := m.GetLastNHistoryDDLJobs(2) + historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, 2) require.NoError(t, err) require.Len(t, historyJobs, 2) require.Equal(t, int64(1234), historyJobs[0].ID) diff --git a/parser/auth/mysql_native_password.go b/parser/auth/mysql_native_password.go index d781626a68c0f..05c6127c21991 100644 --- a/parser/auth/mysql_native_password.go +++ b/parser/auth/mysql_native_password.go @@ -15,7 +15,7 @@ package auth import ( "bytes" - "crypto/sha1" + "crypto/sha1" //nolint: gosec "encoding/hex" "fmt" @@ -39,6 +39,7 @@ import ( // check(candidate_hash2==hash_stage2) // // this three steps are done in check_scramble() func CheckScrambledPassword(salt, hpwd, auth []byte) bool { + //nolint: gosec crypt := sha1.New() _, err := crypt.Write(salt) terror.Log(errors.Trace(err)) @@ -58,6 +59,7 @@ func CheckScrambledPassword(salt, hpwd, auth []byte) bool { // Sha1Hash is an util function to calculate sha1 hash. func Sha1Hash(bs []byte) []byte { + //nolint: gosec crypt := sha1.New() _, err := crypt.Write(bs) terror.Log(errors.Trace(err)) diff --git a/parser/model/ddl.go b/parser/model/ddl.go index a365d0ae3b8d1..8d2a80c64ccdf 100644 --- a/parser/model/ddl.go +++ b/parser/model/ddl.go @@ -82,7 +82,7 @@ const ( __DEPRECATED_ActionAlterTableAlterPartition ActionType = 46 ActionRenameTables ActionType = 47 - ActionDropIndexes ActionType = 48 + ActionDropIndexes ActionType = 48 // Deprecated, we use ActionMultiSchemaChange instead. ActionAlterTableAttributes ActionType = 49 ActionAlterTablePartitionAttributes ActionType = 50 ActionCreatePlacementPolicy ActionType = 51 @@ -144,7 +144,6 @@ var actionMap = map[ActionType]string{ ActionAddCheckConstraint: "add check constraint", ActionDropCheckConstraint: "drop check constraint", ActionAlterCheckConstraint: "alter check constraint", - ActionDropIndexes: "drop multi-indexes", ActionAlterTableAttributes: "alter table attributes", ActionAlterTablePartitionPlacement: "alter table partition placement", ActionAlterTablePartitionAttributes: "alter table partition attributes", @@ -664,7 +663,7 @@ func (job *Job) MayNeedReorg() bool { // IsRollbackable checks whether the job can be rollback. func (job *Job) IsRollbackable() bool { switch job.Type { - case ActionDropIndex, ActionDropPrimaryKey, ActionDropIndexes: + case ActionDropIndex, ActionDropPrimaryKey: // We can't cancel if index current state is in StateDeleteOnly or StateDeleteReorganization or StateWriteOnly, otherwise there will be an inconsistent issue between record and index. // In WriteOnly state, we can rollback for normal index but can't rollback for expression index(need to drop hidden column). Since we can't // know the type of index here, we consider all indices except primary index as non-rollbackable. diff --git a/parser/model/model_test.go b/parser/model/model_test.go index d3058e6255285..480786ec14050 100644 --- a/parser/model/model_test.go +++ b/parser/model/model_test.go @@ -378,7 +378,6 @@ func TestString(t *testing.T) { {ActionAddColumn, "add column"}, {ActionDropColumn, "drop column"}, {ActionModifySchemaCharsetAndCollate, "modify schema charset and collate"}, - {ActionDropIndexes, "drop multi-indexes"}, {ActionAlterTablePlacement, "alter table placement"}, {ActionAlterTablePartitionPlacement, "alter table partition placement"}, {ActionAlterNoCacheTable, "alter table nocache"}, diff --git a/planner/core/explain.go b/planner/core/explain.go index 6d8512d5dd94a..6fb3f7d593e2f 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -518,7 +518,11 @@ func (p *PhysicalUnionScan) ExplainInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalSelection) ExplainInfo() string { - return string(expression.SortedExplainExpressionList(p.Conditions)) + exprStr := string(expression.SortedExplainExpressionList(p.Conditions)) + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + exprStr += fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount) + } + return exprStr } // ExplainNormalizedInfo implements Plan interface. @@ -528,7 +532,11 @@ func (p *PhysicalSelection) ExplainNormalizedInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalProjection) ExplainInfo() string { - return expression.ExplainExpressionList(p.Exprs, p.schema) + exprStr := expression.ExplainExpressionList(p.Exprs, p.schema) + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + exprStr += fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount) + } + return exprStr } // ExplainNormalizedInfo implements Plan interface. @@ -547,7 +555,11 @@ func (p *PhysicalTableDual) ExplainInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalSort) ExplainInfo() string { buffer := bytes.NewBufferString("") - return explainByItems(buffer, p.ByItems).String() + buffer = explainByItems(buffer, p.ByItems) + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + buffer.WriteString(fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount)) + } + return buffer.String() } // ExplainInfo implements Plan interface. @@ -867,6 +879,9 @@ func (p *PhysicalWindow) ExplainInfo() string { p.formatFrameBound(buffer, p.Frame.End) } buffer.WriteString(")") + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + buffer.WriteString(fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount)) + } return buffer.String() } @@ -995,9 +1010,20 @@ func (p *PhysicalExchangeSender) ExplainInfo() string { } fmt.Fprintf(buffer, "]") } + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + buffer.WriteString(fmt.Sprintf(", stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount)) + } return buffer.String() } +// ExplainInfo implements Plan interface. +func (p *PhysicalExchangeReceiver) ExplainInfo() (res string) { + if p.TiFlashFineGrainedShuffleStreamCount > 0 { + res = fmt.Sprintf("stream_count: %d", p.TiFlashFineGrainedShuffleStreamCount) + } + return res +} + // ExplainInfo implements Plan interface. func (p *LogicalUnionScan) ExplainInfo() string { buffer := bytes.NewBufferString("") diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 326755419ae26..63e9d118507a3 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -17,6 +17,7 @@ package core_test import ( "bytes" "fmt" + "regexp" "strconv" "strings" "testing" @@ -6547,6 +6548,107 @@ func TestTiFlashPartitionTableScan(t *testing.T) { tk.MustExec("drop table hp_t;") } +func TestTiFlashFineGrainedShuffle(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") + tk.MustExec("set @@tidb_enforce_mpp = on") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int, c2 int)") + + tbl1, err := dom.InfoSchema().TableByName(model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + require.NoError(t, err) + // Set the hacked TiFlash replica for explain tests. + tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} + var input []string + var output []struct { + SQL string + Plan []string + } + integrationSuiteData := core.GetIntegrationSuiteData() + integrationSuiteData.GetTestCases(t, &input, &output) + for i, tt := range input { + testdata.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + }) + tk.MustQuery(tt).Check(testkit.Rows(output[i].Plan...)) + } +} + +func TestTiFlashFineGrainedShuffleWithMaxTiFlashThreads(t *testing.T) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash'") + tk.MustExec("set @@tidb_enforce_mpp = on") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t1(c1 int, c2 int)") + tbl1, err := dom.InfoSchema().TableByName(model.CIStr{O: "test", L: "test"}, model.CIStr{O: "t1", L: "t1"}) + require.NoError(t, err) + // Set the hacked TiFlash replica for explain tests. + tbl1.Meta().TiFlashReplica = &model.TiFlashReplicaInfo{Count: 1, Available: true} + + sql := "explain select row_number() over w1 from t1 window w1 as (partition by c1);" + + getStreamCountFromExplain := func(rows [][]interface{}) (res []uint64) { + re := regexp.MustCompile("stream_count: ([0-9]+)") + for _, row := range rows { + buf := bytes.NewBufferString("") + _, _ = fmt.Fprintf(buf, "%s\n", row) + if matched := re.FindStringSubmatch(buf.String()); matched != nil { + require.Equal(t, len(matched), 2) + c, err := strconv.ParseUint(matched[1], 10, 64) + require.NoError(t, err) + res = append(res, c) + } + } + return res + } + + // tiflash_fine_grained_shuffle_stream_count should be same with tidb_max_tiflash_threads. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set @@tidb_max_tiflash_threads = 10") + rows := tk.MustQuery(sql).Rows() + streamCount := getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(10), streamCount[0]) + + // tiflash_fine_grained_shuffle_stream_count should be default value when tidb_max_tiflash_threads is -1. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set @@tidb_max_tiflash_threads = -1") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(variable.DefStreamCountWhenMaxThreadsNotSet), streamCount[0]) + + // tiflash_fine_grained_shuffle_stream_count should be default value when tidb_max_tiflash_threads is 0. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = -1") + tk.MustExec("set @@tidb_max_tiflash_threads = 0") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(variable.DefStreamCountWhenMaxThreadsNotSet), streamCount[0]) + + // Disabled when tiflash_fine_grained_shuffle_stream_count is 0. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 0") + tk.MustExec("set @@tidb_max_tiflash_threads = 10") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + require.Equal(t, len(streamCount), 0) + + // Test when tiflash_fine_grained_shuffle_stream_count is greater than 0. + tk.MustExec("set @@tiflash_fine_grained_shuffle_stream_count = 16") + tk.MustExec("set @@tidb_max_tiflash_threads = 10") + rows = tk.MustQuery(sql).Rows() + streamCount = getStreamCountFromExplain(rows) + // require.Equal(t, len(streamCount), 1) + require.Equal(t, uint64(16), streamCount[0]) +} + func TestIssue33175(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 69f067e10fec7..92c2a3dbcfd80 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -5179,18 +5179,21 @@ func CheckUpdateList(assignFlags []int, updt *Update, newTblID2Table map[int64]t } for i, col := range tbl.WritableCols() { - if flags[i] >= 0 && col.State != model.StatePublic { + if flags[i] < 0 { + continue + } + + if col.State != model.StatePublic { return ErrUnknownColumn.GenWithStackByArgs(col.Name, clauseMsg[fieldList]) } - if flags[i] >= 0 { - update = true - if mysql.HasPriKeyFlag(col.GetFlag()) { - updatePK = true - } - for _, partColName := range partitionColumnNames { - if col.Name.L == partColName.L { - updatePartitionCol = true - } + + update = true + if mysql.HasPriKeyFlag(col.GetFlag()) { + updatePK = true + } + for _, partColName := range partitionColumnNames { + if col.Name.L == partColName.L { + updatePartitionCol = true } } } diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index b6f279f81c4e5..0a73522baa9a3 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -55,6 +55,7 @@ func createPlannerSuite() (s *plannerSuite) { MockRangePartitionTable(), MockHashPartitionTable(), MockListPartitionTable(), + MockStateNoneColumnTable(), } id := int64(0) for _, tblInfo := range tblInfos { @@ -911,6 +912,10 @@ func TestValidate(t *testing.T) { sql: "select a+1 from t having t.a", err: ErrUnknownColumn, }, + { + sql: "update T_StateNoneColumn set c = 1 where a = 1", + err: ErrUnknownColumn, + }, } s := createPlannerSuite() diff --git a/planner/core/mock.go b/planner/core/mock.go index 159eb67ee44bf..fb554878862c1 100644 --- a/planner/core/mock.go +++ b/planner/core/mock.go @@ -553,3 +553,55 @@ func MockListPartitionTable() *model.TableInfo { tableInfo.Partition = partition return tableInfo } + +// MockStateNoneColumnTable is only used for plan related tests. +func MockStateNoneColumnTable() *model.TableInfo { + // column: a, b + // PK: a + // indeices: b + indices := []*model.IndexInfo{ + { + Name: model.NewCIStr("b"), + Columns: []*model.IndexColumn{ + { + Name: model.NewCIStr("b"), + Length: types.UnspecifiedLength, + Offset: 1, + }, + }, + State: model.StatePublic, + Unique: true, + }, + } + pkColumn := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 0, + Name: model.NewCIStr("a"), + FieldType: newLongType(), + ID: 1, + } + col0 := &model.ColumnInfo{ + State: model.StatePublic, + Offset: 1, + Name: model.NewCIStr("b"), + FieldType: newLongType(), + ID: 2, + } + col1 := &model.ColumnInfo{ + State: model.StateNone, + Offset: 2, + Name: model.NewCIStr("c"), + FieldType: newLongType(), + ID: 3, + } + pkColumn.SetFlag(mysql.PriKeyFlag | mysql.NotNullFlag | mysql.UnsignedFlag) + col0.SetFlag(mysql.NotNullFlag) + col1.SetFlag(mysql.UnsignedFlag) + table := &model.TableInfo{ + Columns: []*model.ColumnInfo{pkColumn, col0, col1}, + Indices: indices, + Name: model.NewCIStr("T_StateNoneColumn"), + PKIsHandle: true, + } + return table +} diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index fde76b3a41eec..20d4fd598e701 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/set" "github.com/pingcap/tidb/util/tracing" + "github.com/pingcap/tipb/go-tipb" "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/exp/slices" @@ -373,10 +374,139 @@ func postOptimize(sctx sessionctx.Context, plan PhysicalPlan) PhysicalPlan { mergeContinuousSelections(plan) plan = eliminateUnionScanAndLock(sctx, plan) plan = enableParallelApply(sctx, plan) + handleFineGrainedShuffle(sctx, plan) checkPlanCacheable(sctx, plan) return plan } +// Only for MPP(Window<-[Sort]<-ExchangeReceiver<-ExchangeSender). +// TiFlashFineGrainedShuffleStreamCount: +// == 0: fine grained shuffle is disabled. +// > 0: use TiFlashFineGrainedShuffleStreamCount as stream count. +// < 0: use TiFlashMaxThreads as stream count when it's greater than 0. Otherwise use DefStreamCountWhenMaxThreadsNotSet. +func handleFineGrainedShuffle(sctx sessionctx.Context, plan PhysicalPlan) { + streamCount := sctx.GetSessionVars().TiFlashFineGrainedShuffleStreamCount + if streamCount == 0 { + return + } + if streamCount < 0 { + if sctx.GetSessionVars().TiFlashMaxThreads > 0 { + streamCount = sctx.GetSessionVars().TiFlashMaxThreads + } else { + streamCount = variable.DefStreamCountWhenMaxThreadsNotSet + } + } + setupFineGrainedShuffle(uint64(streamCount), plan) +} + +func setupFineGrainedShuffle(streamCount uint64, plan PhysicalPlan) { + if tableReader, ok := plan.(*PhysicalTableReader); ok { + if _, isExchangeSender := tableReader.tablePlan.(*PhysicalExchangeSender); isExchangeSender { + helper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: make([]*basePhysicalPlan, 1)} + setupFineGrainedShuffleInternal(tableReader.tablePlan, &helper, streamCount) + } + } else { + for _, child := range plan.Children() { + setupFineGrainedShuffle(streamCount, child) + } + } +} + +type shuffleTarget uint8 + +const ( + unknown shuffleTarget = iota + window + joinBuild +) + +type fineGrainedShuffleHelper struct { + shuffleTarget shuffleTarget + plans []*basePhysicalPlan +} + +func (h *fineGrainedShuffleHelper) clear() { + h.shuffleTarget = unknown + h.plans = h.plans[:0] +} + +func (h *fineGrainedShuffleHelper) updateTarget(t shuffleTarget, p *basePhysicalPlan) { + h.shuffleTarget = t + h.plans = append(h.plans, p) +} + +func setupFineGrainedShuffleInternal(plan PhysicalPlan, helper *fineGrainedShuffleHelper, streamCount uint64) { + switch x := plan.(type) { + case *PhysicalWindow: + // Do not clear the plans because window executor will keep the data partition. + // For non hash partition window function, there will be a passthrough ExchangeSender to collect data, + // which will break data partition. + helper.updateTarget(window, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalSort: + if x.IsPartialSort { + // Partial sort will keep the data partition. + helper.plans = append(helper.plans, &x.basePhysicalPlan) + } else { + // Global sort will break the data partition. + helper.clear() + } + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalSelection: + helper.plans = append(helper.plans, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalProjection: + helper.plans = append(helper.plans, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalExchangeReceiver: + helper.plans = append(helper.plans, &x.basePhysicalPlan) + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalHashAgg: + // HashAgg is not implemented for now. + helper.clear() + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + case *PhysicalHashJoin: + child0 := x.children[0] + child1 := x.children[1] + if x.InnerChildIdx == 0 { + // Child0 is build side. + child0Helper := fineGrainedShuffleHelper{shuffleTarget: joinBuild, plans: []*basePhysicalPlan{}} + setupFineGrainedShuffleInternal(child0, &child0Helper, streamCount) + + // HashJoin is not implemented for now. + helper.clear() + setupFineGrainedShuffleInternal(child1, helper, streamCount) + } else { + // Child1 is build side. + child1Helper := fineGrainedShuffleHelper{shuffleTarget: joinBuild, plans: []*basePhysicalPlan{}} + setupFineGrainedShuffleInternal(child1, &child1Helper, streamCount) + + // HashJoin is not implemented for now. + helper.clear() + setupFineGrainedShuffleInternal(child0, helper, streamCount) + } + case *PhysicalExchangeSender: + if x.ExchangeType == tipb.ExchangeType_Hash { + if helper.shuffleTarget == window { + // Set up stream count for all plans based on shuffle target type. + // Currently, only enable fine grained shuffle if the shuffle target is window. + x.TiFlashFineGrainedShuffleStreamCount = streamCount + for _, p := range helper.plans { + p.TiFlashFineGrainedShuffleStreamCount = streamCount + } + } + } + // exchange sender will break the data partition. + helper.clear() + setupFineGrainedShuffleInternal(x.children[0], helper, streamCount) + default: + for _, child := range x.Children() { + childHelper := fineGrainedShuffleHelper{shuffleTarget: unknown, plans: []*basePhysicalPlan{}} + setupFineGrainedShuffleInternal(child, &childHelper, streamCount) + } + } +} + // checkPlanCacheable used to check whether a plan can be cached. Plans that // meet the following characteristics cannot be cached: // 1. Use the TiFlash engine. diff --git a/planner/core/optimizer_test.go b/planner/core/optimizer_test.go index cc742c747b406..dd8a41bbab1f3 100644 --- a/planner/core/optimizer_test.go +++ b/planner/core/optimizer_test.go @@ -15,10 +15,13 @@ package core import ( + "reflect" "testing" "github.com/pingcap/tidb/parser/mysql" + "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/types" + "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" ) @@ -102,3 +105,187 @@ func TestMPPJoinKeyTypeConvert(t *testing.T) { testJoinKeyTypeConvert(t, unsignedBigIntType, bigIntType, decimalType, true, true) testJoinKeyTypeConvert(t, bigIntType, unsignedBigIntType, decimalType, true, true) } + +// Test for core.handleFineGrainedShuffle() +func TestHandleFineGrainedShuffle(t *testing.T) { + sortItem := property.SortItem{ + Col: nil, + Desc: true, + } + var plans []*basePhysicalPlan + tableReader := &PhysicalTableReader{} + partWindow := &PhysicalWindow{ + // Meaningless sort item, just for test. + PartitionBy: []property.SortItem{sortItem}, + } + partialSort := &PhysicalSort{ + IsPartialSort: true, + } + sort := &PhysicalSort{} + recv := &PhysicalExchangeReceiver{} + passSender := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_PassThrough, + } + hashSender := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_Hash, + } + tableScan := &PhysicalTableScan{} + plans = append(plans, &partWindow.basePhysicalPlan) + plans = append(plans, &partialSort.basePhysicalPlan) + plans = append(plans, &sort.basePhysicalPlan) + plans = append(plans, &recv.basePhysicalPlan) + plans = append(plans, &hashSender.basePhysicalPlan) + clear := func(plans []*basePhysicalPlan) { + for _, p := range plans { + p.children = nil + p.TiFlashFineGrainedShuffleStreamCount = 0 + } + } + var check func(p PhysicalPlan, expStreamCount int64, expChildCount int, curChildCount int) + check = func(p PhysicalPlan, expStreamCount int64, expChildCount int, curChildCount int) { + if len(p.Children()) == 0 { + require.Equal(t, expChildCount, curChildCount) + _, isTableScan := p.(*PhysicalTableScan) + require.True(t, isTableScan) + return + } + val := reflect.ValueOf(p) + actStreamCount := reflect.Indirect(val).FieldByName("TiFlashFineGrainedShuffleStreamCount").Interface().(uint64) + require.Equal(t, uint64(expStreamCount), actStreamCount) + for _, child := range p.Children() { + check(child, expStreamCount, expChildCount, curChildCount+1) + } + } + + const expStreamCount int64 = 8 + sctx := MockContext() + sctx.GetSessionVars().TiFlashFineGrainedShuffleStreamCount = expStreamCount + + start := func(p PhysicalPlan, expStreamCount int64, expChildCount int, curChildCount int) { + handleFineGrainedShuffle(sctx, tableReader) + check(p, expStreamCount, expChildCount, curChildCount) + clear(plans) + } + + // Window <- Sort <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{partialSort} + partialSort.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, expStreamCount, 4, 0) + + // Window <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, expStreamCount, 3, 0) + + // Window <- Sort(x) <- ExchangeReceiver <- ExchangeSender + // Fine-grained shuffle is disabled because sort is not partial. + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{sort} + sort.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, 0, 4, 0) + + // Window <- Sort <- Window <- Sort <- ExchangeReceiver <- ExchangeSender + partWindow1 := &PhysicalWindow{ + // Meaningless sort item, just for test. + PartitionBy: []property.SortItem{sortItem}, + } + partialSort1 := &PhysicalSort{ + IsPartialSort: true, + } + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{partialSort} + partialSort.children = []PhysicalPlan{partWindow1} + partWindow1.children = []PhysicalPlan{partialSort1} + partialSort1.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, expStreamCount, 6, 0) + + // Window <- Sort <- Window(x) <- Sort <- ExchangeReceiver <- ExchangeSender(x) + // Fine-grained shuffle is disabled because Window is not hash partition. + nonPartWindow := &PhysicalWindow{} + partialSort1 = &PhysicalSort{ + IsPartialSort: true, + } + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{partialSort} + partialSort.children = []PhysicalPlan{nonPartWindow} + nonPartWindow.children = []PhysicalPlan{partialSort1} + partialSort1.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{passSender} + passSender.children = []PhysicalPlan{tableScan} + start(partWindow, 0, 6, 0) + + // HashAgg <- Window <- ExchangeReceiver <- ExchangeSender + hashAgg := &PhysicalHashAgg{} + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{hashAgg} + hashAgg.children = []PhysicalPlan{partWindow} + partWindow.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + require.Equal(t, uint64(0), hashAgg.TiFlashFineGrainedShuffleStreamCount) + start(partWindow, expStreamCount, 3, 0) + + // Window <- HashAgg(x) <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + hashAgg = &PhysicalHashAgg{} + partWindow.children = []PhysicalPlan{hashAgg} + hashAgg.children = []PhysicalPlan{recv} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{tableScan} + start(partWindow, 0, 4, 0) + + // Window <- Join(x) <- ExchangeReceiver <- ExchangeSender + // <- ExchangeReceiver <- ExchangeSender + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + hashJoin := &PhysicalHashJoin{} + recv1 := &PhysicalExchangeReceiver{} + tableScan1 := &PhysicalTableScan{} + partWindow.children = []PhysicalPlan{hashJoin} + hashSender1 := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_Hash, + } + hashJoin.children = []PhysicalPlan{recv, recv1} + recv.children = []PhysicalPlan{hashSender} + recv1.children = []PhysicalPlan{hashSender1} + hashSender.children = []PhysicalPlan{tableScan} + hashSender1.children = []PhysicalPlan{tableScan1} + start(partWindow, 0, 4, 0) + + // Join <- ExchangeReceiver <- ExchangeSender <- Window <- ExchangeReceiver(2) <- ExchangeSender(2) + // <- ExchangeReceiver(1) <- ExchangeSender(1) + tableReader.tablePlan = passSender + passSender.children = []PhysicalPlan{partWindow} + hashJoin = &PhysicalHashJoin{} + recv1 = &PhysicalExchangeReceiver{} + hashJoin.children = []PhysicalPlan{recv, recv1} + recv.children = []PhysicalPlan{hashSender} + hashSender.children = []PhysicalPlan{partWindow} + recv2 := &PhysicalExchangeReceiver{} + hashSender2 := &PhysicalExchangeSender{ + ExchangeType: tipb.ExchangeType_Hash, + } + tableScan2 := &PhysicalTableScan{} + partWindow.children = []PhysicalPlan{recv2} + recv2.children = []PhysicalPlan{hashSender2} + hashSender2.children = []PhysicalPlan{tableScan2} + recv1.children = []PhysicalPlan{hashSender1} + tableScan1 = &PhysicalTableScan{} + hashSender1.children = []PhysicalPlan{tableScan1} + start(partWindow, expStreamCount, 3, 0) +} diff --git a/planner/core/plan.go b/planner/core/plan.go index aad8d06b68a7e..1dedfd05cf7e2 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -427,6 +427,11 @@ type basePhysicalPlan struct { // used by the new cost interface planCostInit bool planCost float64 + + // Only for MPP. If TiFlashFineGrainedShuffleStreamCount > 0: + // 1. For ExchangeSender, means its output will be partitioned by hash key. + // 2. For ExchangeReceiver/Window/Sort, means its input is already partitioned. + TiFlashFineGrainedShuffleStreamCount uint64 } // Cost implements PhysicalPlan interface. @@ -441,8 +446,9 @@ func (p *basePhysicalPlan) SetCost(cost float64) { func (p *basePhysicalPlan) cloneWithSelf(newSelf PhysicalPlan) (*basePhysicalPlan, error) { base := &basePhysicalPlan{ - basePlan: p.basePlan, - self: newSelf, + basePlan: p.basePlan, + self: newSelf, + TiFlashFineGrainedShuffleStreamCount: p.TiFlashFineGrainedShuffleStreamCount, } for _, child := range p.children { cloned, err := child.Clone() diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go index e9ec780bbfd13..003ca690a206d 100644 --- a/planner/core/plan_test.go +++ b/planner/core/plan_test.go @@ -893,6 +893,36 @@ func TestIssue34863(t *testing.T) { tk.MustQuery("select count(o.c_id) from c right join o on c.c_id=o.c_id;").Check(testkit.Rows("5")) } +func TestCloneFineGrainedShuffleStreamCount(t *testing.T) { + window := &core.PhysicalWindow{} + newPlan, err := window.Clone() + require.NoError(t, err) + newWindow, ok := newPlan.(*core.PhysicalWindow) + require.Equal(t, ok, true) + require.Equal(t, window.TiFlashFineGrainedShuffleStreamCount, newWindow.TiFlashFineGrainedShuffleStreamCount) + + window.TiFlashFineGrainedShuffleStreamCount = 8 + newPlan, err = window.Clone() + require.NoError(t, err) + newWindow, ok = newPlan.(*core.PhysicalWindow) + require.Equal(t, ok, true) + require.Equal(t, window.TiFlashFineGrainedShuffleStreamCount, newWindow.TiFlashFineGrainedShuffleStreamCount) + + sort := &core.PhysicalSort{} + newPlan, err = sort.Clone() + require.NoError(t, err) + newSort, ok := newPlan.(*core.PhysicalSort) + require.Equal(t, ok, true) + require.Equal(t, sort.TiFlashFineGrainedShuffleStreamCount, newSort.TiFlashFineGrainedShuffleStreamCount) + + sort.TiFlashFineGrainedShuffleStreamCount = 8 + newPlan, err = sort.Clone() + require.NoError(t, err) + newSort, ok = newPlan.(*core.PhysicalSort) + require.Equal(t, ok, true) + require.Equal(t, sort.TiFlashFineGrainedShuffleStreamCount, newSort.TiFlashFineGrainedShuffleStreamCount) +} + // https://github.com/pingcap/tidb/issues/35527. func TestTableDualAsSubQuery(t *testing.T) { store, clean := testkit.CreateMockStore(t) diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 7f93dd440b3fe..dbea51006c1dd 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -295,9 +295,11 @@ func (e *PhysicalExchangeSender) ToPB(ctx sessionctx.Context, storeType kv.Store } executorID := e.ExplainID().String() return &tipb.Executor{ - Tp: tipb.ExecType_TypeExchangeSender, - ExchangeSender: ecExec, - ExecutorId: &executorID, + Tp: tipb.ExecType_TypeExchangeSender, + ExchangeSender: ecExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: e.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, }, nil } @@ -327,9 +329,11 @@ func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.Sto } executorID := e.ExplainID().String() return &tipb.Executor{ - Tp: tipb.ExecType_TypeExchangeReceiver, - ExchangeReceiver: ecExec, - ExecutorId: &executorID, + Tp: tipb.ExecType_TypeExchangeReceiver, + ExchangeReceiver: ecExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: e.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, }, nil } @@ -540,7 +544,13 @@ func (p *PhysicalWindow) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (* return nil, errors.Trace(err) } executorID := p.ExplainID().String() - return &tipb.Executor{Tp: tipb.ExecType_TypeWindow, Window: windowExec, ExecutorId: &executorID}, nil + return &tipb.Executor{ + Tp: tipb.ExecType_TypeWindow, + Window: windowExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: p.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, + }, nil } // ToPB implements PhysicalPlan ToPB interface. @@ -565,7 +575,13 @@ func (p *PhysicalSort) ToPB(ctx sessionctx.Context, storeType kv.StoreType) (*ti return nil, errors.Trace(err) } executorID := p.ExplainID().String() - return &tipb.Executor{Tp: tipb.ExecType_TypeSort, Sort: sortExec, ExecutorId: &executorID}, nil + return &tipb.Executor{ + Tp: tipb.ExecType_TypeSort, + Sort: sortExec, + ExecutorId: &executorID, + FineGrainedShuffleStreamCount: p.TiFlashFineGrainedShuffleStreamCount, + FineGrainedShuffleBatchSize: ctx.GetSessionVars().TiFlashFineGrainedShuffleBatchSize, + }, nil } // SetPBColumnsDefaultValue sets the default values of tipb.ColumnInfos. diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index df47167354c2a..1b5d016606b30 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -21,7 +21,6 @@ import ( "strings" "github.com/pingcap/errors" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -156,9 +155,8 @@ type PreprocessorReturn struct { SnapshotTSEvaluator func(sessionctx.Context) (uint64, error) // LastSnapshotTS is the last evaluated snapshotTS if any // otherwise it defaults to zero - LastSnapshotTS uint64 - InfoSchema infoschema.InfoSchema - ReadReplicaScope string + LastSnapshotTS uint64 + InfoSchema infoschema.InfoSchema } // preprocessWith is used to record info from WITH statements like CTE name. @@ -1659,18 +1657,6 @@ func (p *preprocessor) updateStateFromStaleReadProcessor() error { } } } - - // It is a little hacking for the below codes. `ReadReplicaScope` is used both by stale read's closest read and local txn. - // They are different features and the value for `ReadReplicaScope` will be conflicted in some scenes. - // But because local txn is still an experimental feature, we should make stale read work first. - if p.IsStaleness || p.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() { - // When stale read or closet read is set, we read the tidb's locality as the read replica scope - p.ReadReplicaScope = config.GetTxnScopeFromConfig() - } else { - // Otherwise, use the scope from TxnCtx for local txn validation - p.ReadReplicaScope = p.ctx.GetSessionVars().TxnCtx.TxnScope - } - p.initedLastSnapshotTS = true return nil } diff --git a/planner/core/testdata/integration_suite_in.json b/planner/core/testdata/integration_suite_in.json index e66c8ebc2fac3..dc2b7b07239e1 100644 --- a/planner/core/testdata/integration_suite_in.json +++ b/planner/core/testdata/integration_suite_in.json @@ -938,5 +938,34 @@ "explain format = 'brief' select count(*) from rp_t where a = 1 or a = 20", "explain format = 'brief' select count(*) from hp_t where a = 1 or a = 20" ] + }, + { + "name": "TestTiFlashFineGrainedShuffle", + "cases": [ + // 1. Can use fine grained shuffle. + "explain format = 'brief' select row_number() over w1 from t1 window w1 as (partition by c1 order by c1);", + // Test two window function. + "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2);", + // Limit + Order. + "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2) order by 1, 2 limit 10;", + // // No partition by key in w2, so disabled. But w1 is still enabled. BUG: https://github.com/pingcap/tidb/pull/35256#discussion_r913324160 + // "explain format = 'brief' select row_number() over w1, row_number() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (order by c1);", + // GroupBy key and window function partition key are not same. + "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c2 order by c2);", + "explain format = 'brief' select row_number() over w1, count(c1) from t1 group by c2 having c2 > 10 window w1 as (partition by c1 order by c2);", + // Join, same as GroupBy. + "explain format = 'brief' select row_number() over w1 from t1 a join t1 b on a.c1 = b.c2 window w1 as (partition by a.c1);", + // Selection. + "explain format = 'brief' select row_number() over w1 from t1 where c1 < 100 window w1 as (partition by c1 order by c1);", + + // 2. Cannot use fine grained shuffle. + // No window function, so disabled. + "explain format = 'brief' select * from t1;", + // No partition key in window function, so disabled. + "explain format = 'brief' select row_number() over w1 from t1 window w1 as (order by c1);", + // GroupBy key is same with window function partition key, so they are in one fragment. + // But fine grained shuffle doesn't support group by for now. + "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c1 order by c2);" + ] } ] diff --git a/planner/core/testdata/integration_suite_out.json b/planner/core/testdata/integration_suite_out.json index d305f810334b1..aeff80fd103ea 100644 --- a/planner/core/testdata/integration_suite_out.json +++ b/planner/core/testdata/integration_suite_out.json @@ -7031,5 +7031,165 @@ ] } ] + }, + { + "Name": "TestTiFlashFineGrainedShuffle", + "Cases": [ + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 window w1 as (partition by c1 order by c1);", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 10000.00 mpp[tiflash] Column#5, stream_count: 8", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#5 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2);", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 10000.00 mpp[tiflash] Column#7, Column#6, stream_count: 8", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Window 10000.00 mpp[tiflash] rank()->Column#6 over(partition by test.t1.c2), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary], stream_count: 8", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, rank() over w2 from t1 window w1 as (partition by c1 order by c1), w2 as (partition by c2) order by 1, 2 limit 10;", + "Plan": [ + "Projection 10.00 root Column#7, Column#6", + "└─TopN 10.00 root Column#7, Column#6, offset:0, count:10", + " └─TableReader 10.00 root data:ExchangeSender", + " └─ExchangeSender 10.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TopN 10.00 mpp[tiflash] Column#7, Column#6, offset:0, count:10", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Window 10000.00 mpp[tiflash] rank()->Column#6 over(partition by test.t1.c2), stream_count: 8", + " └─Sort 10000.00 mpp[tiflash] test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary], stream_count: 8", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c2 order by c2);", + "Plan": [ + "TableReader 2666.67 root data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] Column#6, Column#4, stream_count: 8", + " └─Window 2666.67 mpp[tiflash] row_number()->Column#6 over(partition by test.t1.c2 order by test.t1.c2 rows between current row and current row), stream_count: 8", + " └─Sort 2666.67 mpp[tiflash] test.t1.c2, test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 2666.67 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary], stream_count: 8", + " └─Projection 2666.67 mpp[tiflash] Column#4, test.t1.c2", + " └─HashAgg 2666.67 mpp[tiflash] group by:test.t1.c1, funcs:count(test.t1.c2)->Column#4, funcs:firstrow(test.t1.c2)->test.t1.c2", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t1.c1, 10)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, count(c1) from t1 group by c2 having c2 > 10 window w1 as (partition by c1 order by c2);", + "Plan": [ + "TableReader 2666.67 root data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] Column#6, Column#4, stream_count: 8", + " └─Window 2666.67 mpp[tiflash] row_number()->Column#6 over(partition by test.t1.c1 order by test.t1.c2 rows between current row and current row), stream_count: 8", + " └─Sort 2666.67 mpp[tiflash] test.t1.c1, test.t1.c2, stream_count: 8", + " └─ExchangeReceiver 2666.67 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Projection 2666.67 mpp[tiflash] Column#4, test.t1.c1, test.t1.c2", + " └─HashAgg 2666.67 mpp[tiflash] group by:test.t1.c2, funcs:count(test.t1.c1)->Column#4, funcs:firstrow(test.t1.c1)->test.t1.c1, funcs:firstrow(test.t1.c2)->test.t1.c2", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c2, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t1.c2, 10)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 a join t1 b on a.c1 = b.c2 window w1 as (partition by a.c1);", + "Plan": [ + "TableReader 12487.50 root data:ExchangeSender", + "└─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 12487.50 mpp[tiflash] Column#8, stream_count: 8", + " └─Window 12487.50 mpp[tiflash] row_number()->Column#8 over(partition by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 12487.50 mpp[tiflash] test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 12487.50 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 12487.50 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─HashJoin 12487.50 mpp[tiflash] inner join, equal:[eq(test.t1.c1, test.t1.c2)]", + " ├─ExchangeReceiver(Build) 9990.00 mpp[tiflash] ", + " │ └─ExchangeSender 9990.00 mpp[tiflash] ExchangeType: Broadcast", + " │ └─Selection 9990.00 mpp[tiflash] not(isnull(test.t1.c1))", + " │ └─TableFullScan 10000.00 mpp[tiflash] table:a keep order:false, stats:pseudo", + " └─Selection(Probe) 9990.00 mpp[tiflash] not(isnull(test.t1.c2))", + " └─TableFullScan 10000.00 mpp[tiflash] table:b keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 where c1 < 100 window w1 as (partition by c1 order by c1);", + "Plan": [ + "TableReader 3323.33 root data:ExchangeSender", + "└─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 3323.33 mpp[tiflash] Column#5, stream_count: 8", + " └─Window 3323.33 mpp[tiflash] row_number()->Column#5 over(partition by test.t1.c1 order by test.t1.c1 rows between current row and current row), stream_count: 8", + " └─Sort 3323.33 mpp[tiflash] test.t1.c1, test.t1.c1, stream_count: 8", + " └─ExchangeReceiver 3323.33 mpp[tiflash] stream_count: 8", + " └─ExchangeSender 3323.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary], stream_count: 8", + " └─Selection 3323.33 mpp[tiflash] lt(test.t1.c1, 100)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select * from t1;", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1 from t1 window w1 as (order by c1);", + "Plan": [ + "TableReader 10000.00 root data:ExchangeSender", + "└─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 10000.00 mpp[tiflash] Column#5", + " └─Window 10000.00 mpp[tiflash] row_number()->Column#5 over(order by test.t1.c1 rows between current row and current row)", + " └─Sort 10000.00 mpp[tiflash] test.t1.c1", + " └─ExchangeReceiver 10000.00 mpp[tiflash] ", + " └─ExchangeSender 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "SQL": "explain format = 'brief' select row_number() over w1, count(c2) from t1 group by c1 having c1 > 10 window w1 as (partition by c1 order by c2);", + "Plan": [ + "TableReader 2666.67 root data:ExchangeSender", + "└─ExchangeSender 2666.67 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection 2666.67 mpp[tiflash] Column#6, Column#4", + " └─Window 2666.67 mpp[tiflash] row_number()->Column#6 over(partition by test.t1.c1 order by test.t1.c2 rows between current row and current row)", + " └─Sort 2666.67 mpp[tiflash] test.t1.c1, test.t1.c2", + " └─Projection 2666.67 mpp[tiflash] Column#4, test.t1.c1, test.t1.c2", + " └─HashAgg 2666.67 mpp[tiflash] group by:test.t1.c1, funcs:count(test.t1.c2)->Column#4, funcs:firstrow(test.t1.c1)->test.t1.c1, funcs:firstrow(test.t1.c2)->test.t1.c2", + " └─ExchangeReceiver 3333.33 mpp[tiflash] ", + " └─ExchangeSender 3333.33 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t1.c1, collate: binary]", + " └─Selection 3333.33 mpp[tiflash] gt(test.t1.c1, 10)", + " └─TableFullScan 10000.00 mpp[tiflash] table:t1 keep order:false, stats:pseudo" + ] + } + ] } ] diff --git a/planner/core/testdata/window_push_down_suite_out.json b/planner/core/testdata/window_push_down_suite_out.json index 2b7b7b893cda4..085d1326f3daa 100644 --- a/planner/core/testdata/window_push_down_suite_out.json +++ b/planner/core/testdata/window_push_down_suite_out.json @@ -37,10 +37,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -50,11 +50,11 @@ "Plan": [ "TableReader_30 10000.00 root data:ExchangeSender_29", "└─ExchangeSender_29 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_7 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#7", - " └─Window_28 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#6 rows between current row and current row)", - " └─Sort_14 10000.00 mpp[tiflash] Column#6", - " └─ExchangeReceiver_13 10000.00 mpp[tiflash] ", - " └─ExchangeSender_12 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#6, collate: binary]", + " └─Projection_7 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#7, stream_count: 8", + " └─Window_28 10000.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#6 rows between current row and current row), stream_count: 8", + " └─Sort_14 10000.00 mpp[tiflash] Column#6, stream_count: 8", + " └─ExchangeReceiver_13 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_12 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#6, collate: binary], stream_count: 8", " └─Projection_10 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, plus(test.employee.deptid, 1)->Column#6", " └─TableFullScan_11 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], @@ -65,10 +65,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid order by test.employee.salary desc rows between current row and current row)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, test.employee.salary:desc", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid order by test.employee.salary desc rows between current row and current row), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, test.employee.salary:desc, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -78,10 +78,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] rank()->Column#7, dense_rank()->Column#8 over(partition by test.employee.deptid)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] rank()->Column#7, dense_rank()->Column#8 over(partition by test.employee.deptid), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -91,12 +91,12 @@ "Plan": [ "TableReader_36 10000.00 root data:ExchangeSender_35", "└─ExchangeSender_35 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Projection_9 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#8, Column#7", - " └─Window_34 10000.00 mpp[tiflash] row_number()->Column#8 over(partition by test.employee.deptid rows between current row and current row)", - " └─Window_12 10000.00 mpp[tiflash] rank()->Column#7 over(partition by test.employee.deptid)", - " └─Sort_17 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_16 10000.00 mpp[tiflash] ", - " └─ExchangeSender_15 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Projection_9 10000.00 mpp[tiflash] test.employee.empid, test.employee.deptid, test.employee.salary, Column#8, Column#7, stream_count: 8", + " └─Window_34 10000.00 mpp[tiflash] row_number()->Column#8 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Window_12 10000.00 mpp[tiflash] rank()->Column#7 over(partition by test.employee.deptid), stream_count: 8", + " └─Sort_17 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_16 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_15 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_14 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -119,10 +119,10 @@ "Plan": [ "TableReader_36 10000.00 root data:ExchangeSender_35", "└─ExchangeSender_35 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_34 10000.00 mpp[tiflash] rank()->Column#8 over(partition by test.employee.deptid)", - " └─Sort_20 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_19 10000.00 mpp[tiflash] ", - " └─ExchangeSender_18 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_34 10000.00 mpp[tiflash] rank()->Column#8 over(partition by test.employee.deptid), stream_count: 8", + " └─Sort_20 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_19 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_18 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─Window_14 10000.00 mpp[tiflash] row_number()->Column#6 over(rows between current row and current row)", " └─ExchangeReceiver_17 10000.00 mpp[tiflash] ", " └─ExchangeSender_16 10000.00 mpp[tiflash] ExchangeType: PassThrough", @@ -285,10 +285,10 @@ "Plan": [ "TableReader_24 10000.00 root data:ExchangeSender_23", "└─ExchangeSender_23 10000.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.empid order by test.employee.salary rows between current row and current row)", - " └─Sort_13 10000.00 mpp[tiflash] test.employee.empid, test.employee.salary", - " └─ExchangeReceiver_12 10000.00 mpp[tiflash] ", - " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.empid, collate: binary]", + " └─Window_22 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.empid order by test.employee.salary rows between current row and current row), stream_count: 8", + " └─Sort_13 10000.00 mpp[tiflash] test.employee.empid, test.employee.salary, stream_count: 8", + " └─ExchangeReceiver_12 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_11 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.empid, collate: binary], stream_count: 8", " └─TableFullScan_10 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": [ @@ -353,10 +353,10 @@ "Plan": [ "TableReader_45 1.00 root data:ExchangeSender_44", "└─ExchangeSender_44 1.00 mpp[tiflash] ExchangeType: PassThrough", - " └─Window_43 1.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#5 rows between current row and current row)", - " └─Sort_20 1.00 mpp[tiflash] Column#5", - " └─ExchangeReceiver_19 1.00 mpp[tiflash] ", - " └─ExchangeSender_18 1.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#5, collate: binary]", + " └─Window_43 1.00 mpp[tiflash] row_number()->Column#7 over(partition by Column#5 rows between current row and current row), stream_count: 8", + " └─Sort_20 1.00 mpp[tiflash] Column#5, stream_count: 8", + " └─ExchangeReceiver_19 1.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_18 1.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#5, collate: binary], stream_count: 8", " └─Projection_14 1.00 mpp[tiflash] Column#5", " └─HashAgg_15 1.00 mpp[tiflash] funcs:count(distinct test.employee.empid)->Column#5", " └─ExchangeReceiver_17 1.00 mpp[tiflash] ", @@ -405,10 +405,10 @@ " └─ExchangeReceiver_43 1.00 mpp[tiflash] ", " └─ExchangeSender_42 1.00 mpp[tiflash] ExchangeType: PassThrough", " └─HashAgg_39 1.00 mpp[tiflash] group by:test.employee.empid, ", - " └─Window_27 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row)", - " └─Sort_18 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_17 10000.00 mpp[tiflash] ", - " └─ExchangeSender_16 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_27 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Sort_18 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_17 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_16 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_15 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null @@ -436,10 +436,10 @@ " └─HashAgg_46 10000.00 mpp[tiflash] group by:Column#6, funcs:count(test.employee.empid)->Column#7", " └─ExchangeReceiver_32 10000.00 mpp[tiflash] ", " └─ExchangeSender_31 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: Column#6, collate: binary]", - " └─Window_30 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row)", - " └─Sort_21 10000.00 mpp[tiflash] test.employee.deptid", - " └─ExchangeReceiver_20 10000.00 mpp[tiflash] ", - " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary]", + " └─Window_30 10000.00 mpp[tiflash] row_number()->Column#6 over(partition by test.employee.deptid rows between current row and current row), stream_count: 8", + " └─Sort_21 10000.00 mpp[tiflash] test.employee.deptid, stream_count: 8", + " └─ExchangeReceiver_20 10000.00 mpp[tiflash] stream_count: 8", + " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.employee.deptid, collate: binary], stream_count: 8", " └─TableFullScan_18 10000.00 mpp[tiflash] table:employee keep order:false, stats:pseudo" ], "Warn": null diff --git a/server/http_handler.go b/server/http_handler.go index 73d3020fbec52..331f0206ef6dd 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -1016,6 +1016,7 @@ func getSchemaTablesStorageInfo(h *schemaStorageHandler, schema *model.CIStr, ta sql := `select TABLE_SCHEMA,TABLE_NAME,TABLE_ROWS,AVG_ROW_LENGTH,DATA_LENGTH,MAX_DATA_LENGTH,INDEX_LENGTH,DATA_FREE from INFORMATION_SCHEMA.TABLES` if len(condition) > 0 { + //nolint: gosec sql += ` WHERE ` + strings.Join(condition, ` AND `) } var results sqlexec.RecordSet diff --git a/server/plan_replayer.go b/server/plan_replayer.go index b783fad87f66f..39938e867f32e 100644 --- a/server/plan_replayer.go +++ b/server/plan_replayer.go @@ -81,6 +81,7 @@ func handleDownloadFile(handler downloadFileHandler, w http.ResponseWriter, req return } if exist { + //nolint: gosec file, err := os.Open(path) if err != nil { writeError(w, err) diff --git a/session/BUILD.bazel b/session/BUILD.bazel index b75f26dc03634..864e2d563679d 100644 --- a/session/BUILD.bazel +++ b/session/BUILD.bazel @@ -123,7 +123,6 @@ go_test( "//domain", "//errno", "//executor", - "//infoschema", "//kv", "//meta", "//parser/ast", diff --git a/session/bench_test.go b/session/bench_test.go index 75be9443cf7e6..f66838c0598ba 100644 --- a/session/bench_test.go +++ b/session/bench_test.go @@ -1813,9 +1813,9 @@ func BenchmarkCompileExecutePreparedStmt(b *testing.B) { is := se.GetInfoSchema() b.ResetTimer() - stmtExec := &ast.ExecuteStmt{ExecID: stmtID} + stmtExec := &ast.ExecuteStmt{ExecID: stmtID, BinaryArgs: args} for i := 0; i < b.N; i++ { - _, _, _, err := executor.CompileExecutePreparedStmt(context.Background(), se, stmtExec, is.(infoschema.InfoSchema), 0, kv.GlobalTxnScope, args) + _, _, _, err := executor.CompileExecutePreparedStmt(context.Background(), se, stmtExec, is.(infoschema.InfoSchema)) if err != nil { b.Fatal(err) } diff --git a/session/session.go b/session/session.go index 5fe688785a286..284fbcf671e61 100644 --- a/session/session.go +++ b/session/session.go @@ -2235,19 +2235,21 @@ func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields return prepareExec.ID, prepareExec.ParamCount, prepareExec.Fields, nil } -func (s *session) preparedStmtExec(ctx context.Context, - is infoschema.InfoSchema, snapshotTS uint64, - execStmt *ast.ExecuteStmt, prepareStmt *plannercore.CachedPrepareStmt, replicaReadScope string, args []types.Datum) (sqlexec.RecordSet, error) { - +func (s *session) preparedStmtExec(ctx context.Context, execStmt *ast.ExecuteStmt, prepareStmt *plannercore.CachedPrepareStmt) (sqlexec.RecordSet, error) { failpoint.Inject("assertTxnManagerInPreparedStmtExec", func() { sessiontxn.RecordAssert(s, "assertTxnManagerInPreparedStmtExec", true) - sessiontxn.AssertTxnManagerInfoSchema(s, is) - if snapshotTS != 0 { - sessiontxn.AssertTxnManagerReadTS(s, snapshotTS) + if prepareStmt.SnapshotTSEvaluator != nil { + staleread.AssertStmtStaleness(s, true) + ts, err := prepareStmt.SnapshotTSEvaluator(s) + if err != nil { + panic(err) + } + sessiontxn.AssertTxnManagerReadTS(s, ts) } }) - st, tiFlashPushDown, tiFlashExchangePushDown, err := executor.CompileExecutePreparedStmt(ctx, s, execStmt, is, snapshotTS, replicaReadScope, args) + is := sessiontxn.GetTxnManager(s).GetTxnInfoSchema() + st, tiFlashPushDown, tiFlashExchangePushDown, err := executor.CompileExecutePreparedStmt(ctx, s, execStmt, is) if err != nil { return nil, err } @@ -2267,18 +2269,17 @@ func (s *session) preparedStmtExec(ctx context.Context, // cachedPointPlanExec is a short path currently ONLY for cached "point select plan" execution func (s *session) cachedPointPlanExec(ctx context.Context, - is infoschema.InfoSchema, execAst *ast.ExecuteStmt, prepareStmt *plannercore.CachedPrepareStmt, replicaReadScope string, args []types.Datum) (sqlexec.RecordSet, bool, error) { + execAst *ast.ExecuteStmt, prepareStmt *plannercore.CachedPrepareStmt) (sqlexec.RecordSet, bool, error) { prepared := prepareStmt.PreparedAst failpoint.Inject("assertTxnManagerInCachedPlanExec", func() { sessiontxn.RecordAssert(s, "assertTxnManagerInCachedPlanExec", true) - sessiontxn.AssertTxnManagerInfoSchema(s, is) // stale read should not reach here staleread.AssertStmtStaleness(s, false) }) - execAst.BinaryArgs = args + is := sessiontxn.GetTxnManager(s).GetTxnInfoSchema() execPlan, err := planner.OptimizeExecStmt(ctx, s, execAst, is) if err != nil { return nil, false, err @@ -2290,15 +2291,14 @@ func (s *session) cachedPointPlanExec(ctx context.Context, stmtCtx := s.GetSessionVars().StmtCtx stmt := &executor.ExecStmt{ - GoCtx: ctx, - InfoSchema: is, - Plan: execPlan, - StmtNode: execAst, - Ctx: s, - OutputNames: execPlan.OutputNames(), - PsStmt: prepareStmt, - Ti: &executor.TelemetryInfo{}, - ReplicaReadScope: replicaReadScope, + GoCtx: ctx, + InfoSchema: is, + Plan: execPlan, + StmtNode: execAst, + Ctx: s, + OutputNames: execPlan.OutputNames(), + PsStmt: prepareStmt, + Ti: &executor.TelemetryInfo{}, } compileDuration := time.Since(s.sessionVars.StartTime) sessionExecuteCompileDurationGeneral.Observe(compileDuration.Seconds()) @@ -2325,7 +2325,7 @@ func (s *session) cachedPointPlanExec(ctx context.Context, var resultSet sqlexec.RecordSet switch execPlan.(type) { case *plannercore.PointGetPlan: - resultSet, err = stmt.PointGet(ctx, is) + resultSet, err = stmt.PointGet(ctx) s.txn.changeToInvalid() case *plannercore.Update: stmtCtx.Priority = kv.PriorityHigh @@ -2342,9 +2342,9 @@ func (s *session) cachedPointPlanExec(ctx context.Context, // IsCachedExecOk check if we can execute using plan cached in prepared structure // Be careful with the short path, current precondition is ths cached plan satisfying // IsPointGetWithPKOrUniqueKeyByAutoCommit -func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore.CachedPrepareStmt, isStaleness bool) (bool, error) { +func (s *session) IsCachedExecOk(preparedStmt *plannercore.CachedPrepareStmt) (bool, error) { prepared := preparedStmt.PreparedAst - if prepared.CachedPlan == nil || isStaleness { + if prepared.CachedPlan == nil || staleread.IsStmtStaleness(s) { return false, nil } // check auto commit @@ -2397,22 +2397,25 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ return nil, errors.Errorf("invalid CachedPrepareStmt type") } - var snapshotTS uint64 - replicaReadScope := oracle.GlobalTxnScope + execStmt := &ast.ExecuteStmt{ExecID: stmtID, BinaryArgs: args} + if err := executor.ResetContextOfStmt(s, execStmt); err != nil { + return nil, err + } staleReadProcessor := staleread.NewStaleReadProcessor(s) if err = staleReadProcessor.OnExecutePreparedStmt(preparedStmt.SnapshotTSEvaluator); err != nil { return nil, err } - txnManager := sessiontxn.GetTxnManager(s) if staleReadProcessor.IsStaleness() { - snapshotTS = staleReadProcessor.GetStalenessReadTS() - is := staleReadProcessor.GetStalenessInfoSchema() - replicaReadScope = config.GetTxnScopeFromConfig() - err = txnManager.EnterNewTxn(ctx, &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnWithReplaceProvider, - Provider: staleread.NewStalenessTxnContextProvider(s, snapshotTS, is), + s.sessionVars.StmtCtx.IsStaleness = true + err = sessiontxn.GetTxnManager(s).EnterNewTxn(ctx, &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnWithReplaceProvider, + Provider: staleread.NewStalenessTxnContextProvider( + s, + staleReadProcessor.GetStalenessReadTS(), + staleReadProcessor.GetStalenessInfoSchema(), + ), }) if err != nil { @@ -2420,20 +2423,14 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ } } - staleness := snapshotTS > 0 executor.CountStmtNode(preparedStmt.PreparedAst.Stmt, s.sessionVars.InRestrictedSQL) - ok, err = s.IsCachedExecOk(ctx, preparedStmt, staleness) + cacheExecOk, err := s.IsCachedExecOk(preparedStmt) if err != nil { return nil, err } s.txn.onStmtStart(preparedStmt.SQLDigest.String()) defer s.txn.onStmtEnd() - execStmt := &ast.ExecuteStmt{ExecID: stmtID} - if err := executor.ResetContextOfStmt(s, execStmt); err != nil { - return nil, err - } - if err = s.onTxnManagerStmtStartOrRetry(ctx, execStmt); err != nil { return nil, err } @@ -2441,8 +2438,8 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ // even the txn is valid, still need to set session variable for coprocessor usage. s.sessionVars.RequestSourceType = preparedStmt.PreparedAst.StmtType - if ok { - rs, ok, err := s.cachedPointPlanExec(ctx, txnManager.GetTxnInfoSchema(), execStmt, preparedStmt, replicaReadScope, args) + if cacheExecOk { + rs, ok, err := s.cachedPointPlanExec(ctx, execStmt, preparedStmt) if err != nil { return nil, err } @@ -2450,7 +2447,7 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ return rs, nil } } - return s.preparedStmtExec(ctx, txnManager.GetTxnInfoSchema(), snapshotTS, execStmt, preparedStmt, replicaReadScope, args) + return s.preparedStmtExec(ctx, execStmt, preparedStmt) } func (s *session) DropPreparedStmt(stmtID uint32) error { @@ -2540,7 +2537,7 @@ func (s *session) NewStaleTxnWithStartTS(ctx context.Context, startTS uint64) er if err := s.checkBeforeNewTxn(ctx); err != nil { return err } - txnScope := config.GetTxnScopeFromConfig() + txnScope := kv.GlobalTxnScope txn, err := s.store.Begin(tikv.WithTxnScope(txnScope), tikv.WithStartTS(startTS)) if err != nil { return err diff --git a/session/txnmanager.go b/session/txnmanager.go index 63c5340e41e3a..9e5e9848b171c 100644 --- a/session/txnmanager.go +++ b/session/txnmanager.go @@ -94,14 +94,30 @@ func (m *txnManager) GetStmtForUpdateTS() (uint64, error) { return ts, nil } -func (m *txnManager) GetReadSnapshot() (kv.Snapshot, error) { +func (m *txnManager) GetTxnScope() string { + if m.ctxProvider == nil { + return kv.GlobalTxnScope + } + return m.ctxProvider.GetTxnScope() +} + +func (m *txnManager) GetReadReplicaScope() string { + if m.ctxProvider == nil { + return kv.GlobalReplicaScope + } + return m.ctxProvider.GetReadReplicaScope() +} + +// GetSnapshotWithStmtReadTS gets snapshot with read ts +func (m *txnManager) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { if m.ctxProvider == nil { return nil, errors.New("context provider not set") } return m.ctxProvider.GetSnapshotWithStmtReadTS() } -func (m *txnManager) GetForUpdateSnapshot() (kv.Snapshot, error) { +// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts +func (m *txnManager) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) { if m.ctxProvider == nil { return nil, errors.New("context provider not set") } diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 4ed0b20abedea..6c1e2f894a2e3 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -1159,6 +1159,10 @@ type SessionVars struct { // MaxAllowedPacket indicates the maximum size of a packet for the MySQL protocol. MaxAllowedPacket uint64 + // TiFlash related optimization, only for MPP. + TiFlashFineGrainedShuffleStreamCount int64 + TiFlashFineGrainedShuffleBatchSize uint64 + // RequestSourceType is the type of inner request. RequestSourceType string } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 2e7865ac57040..c4e0086374f6e 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1662,6 +1662,16 @@ var defaultSysVars = []*SysVar{ return nil }, }, + {Scope: ScopeGlobal | ScopeSession, Name: TiFlashFineGrainedShuffleStreamCount, Value: strconv.Itoa(DefTiFlashFineGrainedShuffleStreamCount), Type: TypeInt, MinValue: -1, MaxValue: 1024, + SetSession: func(s *SessionVars, val string) error { + s.TiFlashFineGrainedShuffleStreamCount = TidbOptInt64(val, DefTiFlashFineGrainedShuffleStreamCount) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiFlashFineGrainedShuffleBatchSize, Value: strconv.Itoa(DefTiFlashFineGrainedShuffleBatchSize), Type: TypeUnsigned, MinValue: 1, MaxValue: math.MaxUint64, + SetSession: func(s *SessionVars, val string) error { + s.TiFlashFineGrainedShuffleBatchSize = uint64(TidbOptInt64(val, DefTiFlashFineGrainedShuffleBatchSize)) + return nil + }}, {Scope: ScopeGlobal, Name: TiDBSimplifiedMetrics, Value: BoolToOnOff(DefTiDBSimplifiedMetrics), Type: TypeBool, SetGlobal: func(vars *SessionVars, s string) error { metrics.ToggleSimplifiedMode(TiDBOptOn(s)) diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 529c31b6e560d..2e55dfdb2353d 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -678,6 +678,10 @@ const ( // When set to true, a non-transactional DML finishes all batches even if errors are met in some batches. TiDBNonTransactionalIgnoreError = "tidb_nontransactional_ignore_error" + // Fine grained shuffle is disabled when TiFlashFineGrainedShuffleStreamCount is zero. + TiFlashFineGrainedShuffleStreamCount = "tiflash_fine_grained_shuffle_stream_count" + TiFlashFineGrainedShuffleBatchSize = "tiflash_fine_grained_shuffle_batch_size" + // TiDBSimplifiedMetrics controls whether to unregister some unused metrics. TiDBSimplifiedMetrics = "tidb_simplified_metrics" ) @@ -752,196 +756,199 @@ const ( // Default TiDB system variable values. const ( - DefHostname = "localhost" - DefIndexLookupConcurrency = ConcurrencyUnset - DefIndexLookupJoinConcurrency = ConcurrencyUnset - DefIndexSerialScanConcurrency = 1 - DefIndexJoinBatchSize = 25000 - DefIndexLookupSize = 20000 - DefDistSQLScanConcurrency = 15 - DefBuildStatsConcurrency = 4 - DefAutoAnalyzeRatio = 0.5 - DefAutoAnalyzeStartTime = "00:00 +0000" - DefAutoAnalyzeEndTime = "23:59 +0000" - DefAutoIncrementIncrement = 1 - DefAutoIncrementOffset = 1 - DefChecksumTableConcurrency = 4 - DefSkipUTF8Check = false - DefSkipASCIICheck = false - DefOptAggPushDown = false - DefOptCartesianBCJ = 1 - DefOptMPPOuterJoinFixedBuildSide = false - DefOptWriteRowID = false - DefOptEnableCorrelationAdjustment = true - DefOptLimitPushDownThreshold = 100 - DefOptCorrelationThreshold = 0.9 - DefOptCorrelationExpFactor = 1 - DefOptCPUFactor = 3.0 - DefOptCopCPUFactor = 3.0 - DefOptTiFlashConcurrencyFactor = 24.0 - DefOptNetworkFactor = 1.0 - DefOptScanFactor = 1.5 - DefOptDescScanFactor = 3.0 - DefOptSeekFactor = 20.0 - DefOptMemoryFactor = 0.001 - DefOptDiskFactor = 1.5 - DefOptConcurrencyFactor = 3.0 - DefOptCPUFactorV2 = 30.0 - DefOptCopCPUFactorV2 = 30.0 - DefOptTiFlashCPUFactorV2 = 2.0 - DefOptNetworkFactorV2 = 4.0 - DefOptScanFactorV2 = 100.0 - DefOptDescScanFactorV2 = 150.0 - DefOptTiFlashScanFactorV2 = 15.0 - DefOptSeekFactorV2 = 9500000.0 - DefOptMemoryFactorV2 = 0.001 - DefOptDiskFactorV2 = 1.5 - DefOptConcurrencyFactorV2 = 3.0 - DefOptInSubqToJoinAndAgg = true - DefOptPreferRangeScan = false - DefBatchInsert = false - DefBatchDelete = false - DefBatchCommit = false - DefCurretTS = 0 - DefInitChunkSize = 32 - DefMaxChunkSize = 1024 - DefDMLBatchSize = 0 - DefMaxPreparedStmtCount = -1 - DefWaitTimeout = 28800 - DefTiDBMemQuotaApplyCache = 32 << 20 // 32MB. - DefTiDBMemQuotaBindingCache = 64 << 20 // 64MB. - DefTiDBGeneralLog = false - DefTiDBPProfSQLCPU = 0 - DefTiDBRetryLimit = 10 - DefTiDBDisableTxnAutoRetry = true - DefTiDBConstraintCheckInPlace = false - DefTiDBHashJoinConcurrency = ConcurrencyUnset - DefTiDBProjectionConcurrency = ConcurrencyUnset - DefBroadcastJoinThresholdSize = 100 * 1024 * 1024 - DefBroadcastJoinThresholdCount = 10 * 1024 - DefTiDBOptimizerSelectivityLevel = 0 - DefTiDBOptimizerEnableNewOFGB = false - DefTiDBEnableOuterJoinReorder = true - DefTiDBAllowBatchCop = 1 - DefTiDBAllowMPPExecution = true - DefTiDBHashExchangeWithNewCollation = true - DefTiDBEnforceMPPExecution = false - DefTiFlashMaxThreads = -1 - DefTiDBMPPStoreFailTTL = "60s" - DefTiDBTxnMode = "" - DefTiDBRowFormatV1 = 1 - DefTiDBRowFormatV2 = 2 - DefTiDBDDLReorgWorkerCount = 4 - DefTiDBDDLReorgBatchSize = 256 - DefTiDBDDLErrorCountLimit = 512 - DefTiDBMaxDeltaSchemaCount = 1024 - DefTiDBChangeMultiSchema = false - DefTiDBPointGetCache = false - DefTiDBPlacementMode = PlacementModeStrict - DefTiDBEnableAutoIncrementInGenerated = false - DefTiDBHashAggPartialConcurrency = ConcurrencyUnset - DefTiDBHashAggFinalConcurrency = ConcurrencyUnset - DefTiDBWindowConcurrency = ConcurrencyUnset - DefTiDBMergeJoinConcurrency = 1 // disable optimization by default - DefTiDBStreamAggConcurrency = 1 - DefTiDBForcePriority = mysql.NoPriority - DefEnableWindowFunction = true - DefEnablePipelinedWindowFunction = true - DefEnableStrictDoubleTypeCheck = true - DefEnableVectorizedExpression = true - DefTiDBOptJoinReorderThreshold = 0 - DefTiDBDDLSlowOprThreshold = 300 - DefTiDBUseFastAnalyze = false - DefTiDBSkipIsolationLevelCheck = false - DefTiDBExpensiveQueryTimeThreshold = 60 // 60s - DefTiDBScatterRegion = false - DefTiDBWaitSplitRegionFinish = true - DefWaitSplitRegionTimeout = 300 // 300s - DefTiDBEnableNoopFuncs = Off - DefTiDBEnableNoopVariables = true - DefTiDBAllowRemoveAutoInc = false - DefTiDBUsePlanBaselines = true - DefTiDBEvolvePlanBaselines = false - DefTiDBEvolvePlanTaskMaxTime = 600 // 600s - DefTiDBEvolvePlanTaskStartTime = "00:00 +0000" - DefTiDBEvolvePlanTaskEndTime = "23:59 +0000" - DefInnodbLockWaitTimeout = 50 // 50s - DefTiDBStoreLimit = 0 - DefTiDBMetricSchemaStep = 60 // 60s - DefTiDBMetricSchemaRangeDuration = 60 // 60s - DefTiDBFoundInPlanCache = false - DefTiDBFoundInBinding = false - DefTiDBEnableCollectExecutionInfo = true - DefTiDBAllowAutoRandExplicitInsert = false - DefTiDBEnableClusteredIndex = ClusteredIndexDefModeIntOnly - DefTiDBRedactLog = false - DefTiDBRestrictedReadOnly = false - DefTiDBSuperReadOnly = false - DefTiDBShardAllocateStep = math.MaxInt64 - DefTiDBEnableTelemetry = true - DefTiDBEnableParallelApply = false - DefTiDBEnableAmendPessimisticTxn = false - DefTiDBPartitionPruneMode = "static" - DefTiDBEnableRateLimitAction = true - DefTiDBEnableAsyncCommit = false - DefTiDBEnable1PC = false - DefTiDBGuaranteeLinearizability = true - DefTiDBAnalyzeVersion = 2 - DefTiDBEnableIndexMergeJoin = false - DefTiDBTrackAggregateMemoryUsage = true - DefTiDBEnableExchangePartition = false - DefCTEMaxRecursionDepth = 1000 - DefTiDBTmpTableMaxSize = 64 << 20 // 64MB. - DefTiDBEnableLocalTxn = false - DefTiDBTSOClientBatchMaxWaitTime = 0.0 // 0ms - DefTiDBEnableTSOFollowerProxy = false - DefTiDBEnableOrderedResultMode = false - DefTiDBEnablePseudoForOutdatedStats = true - DefTiDBRegardNULLAsPoint = true - DefEnablePlacementCheck = true - DefTimestamp = "0" - DefTiDBEnableStmtSummary = true - DefTiDBStmtSummaryInternalQuery = false - DefTiDBStmtSummaryRefreshInterval = 1800 - DefTiDBStmtSummaryHistorySize = 24 - DefTiDBStmtSummaryMaxStmtCount = 3000 - DefTiDBStmtSummaryMaxSQLLength = 4096 - DefTiDBCapturePlanBaseline = Off - DefTiDBEnableIndexMerge = true - DefEnableLegacyInstanceScope = true - DefTiDBTableCacheLease = 3 // 3s - DefTiDBPersistAnalyzeOptions = true - DefTiDBEnableColumnTracking = false - DefTiDBStatsLoadSyncWait = 0 - DefTiDBStatsLoadPseudoTimeout = false - DefSysdateIsNow = false - DefTiDBEnableMutationChecker = false - DefTiDBTxnAssertionLevel = AssertionOffStr - DefTiDBIgnorePreparedCacheCloseStmt = false - DefTiDBBatchPendingTiFlashCount = 4000 - DefRCReadCheckTS = false - DefTiDBRemoveOrderbyInSubquery = false - DefTiDBReadStaleness = 0 - DefTiDBGCMaxWaitTime = 24 * 60 * 60 - DefMaxAllowedPacket uint64 = 67108864 - DefTiDBEnableBatchDML = false - DefTiDBMemQuotaQuery = 1073741824 // 1GB - DefTiDBStatsCacheMemQuota = 0 - MaxTiDBStatsCacheMemQuota = 1024 * 1024 * 1024 * 1024 // 1TB - DefTiDBQueryLogMaxLen = 4096 - DefRequireSecureTransport = false - DefTiDBCommitterConcurrency = 128 - DefTiDBBatchDMLIgnoreError = false - DefTiDBMemQuotaAnalyze = -1 - DefTiDBEnableAutoAnalyze = true - DefTiDBMemOOMAction = "CANCEL" - DefTiDBMaxAutoAnalyzeTime = 12 * 60 * 60 - DefTiDBEnablePrepPlanCache = true - DefTiDBPrepPlanCacheSize = 100 - DefTiDBPrepPlanCacheMemoryGuardRatio = 0.1 - DefTiDBEnableConcurrentDDL = true - DefTiDBSimplifiedMetrics = false - DefTiDBEnablePaging = true + DefHostname = "localhost" + DefIndexLookupConcurrency = ConcurrencyUnset + DefIndexLookupJoinConcurrency = ConcurrencyUnset + DefIndexSerialScanConcurrency = 1 + DefIndexJoinBatchSize = 25000 + DefIndexLookupSize = 20000 + DefDistSQLScanConcurrency = 15 + DefBuildStatsConcurrency = 4 + DefAutoAnalyzeRatio = 0.5 + DefAutoAnalyzeStartTime = "00:00 +0000" + DefAutoAnalyzeEndTime = "23:59 +0000" + DefAutoIncrementIncrement = 1 + DefAutoIncrementOffset = 1 + DefChecksumTableConcurrency = 4 + DefSkipUTF8Check = false + DefSkipASCIICheck = false + DefOptAggPushDown = false + DefOptCartesianBCJ = 1 + DefOptMPPOuterJoinFixedBuildSide = false + DefOptWriteRowID = false + DefOptEnableCorrelationAdjustment = true + DefOptLimitPushDownThreshold = 100 + DefOptCorrelationThreshold = 0.9 + DefOptCorrelationExpFactor = 1 + DefOptCPUFactor = 3.0 + DefOptCopCPUFactor = 3.0 + DefOptTiFlashConcurrencyFactor = 24.0 + DefOptNetworkFactor = 1.0 + DefOptScanFactor = 1.5 + DefOptDescScanFactor = 3.0 + DefOptSeekFactor = 20.0 + DefOptMemoryFactor = 0.001 + DefOptDiskFactor = 1.5 + DefOptConcurrencyFactor = 3.0 + DefOptCPUFactorV2 = 30.0 + DefOptCopCPUFactorV2 = 30.0 + DefOptTiFlashCPUFactorV2 = 2.0 + DefOptNetworkFactorV2 = 4.0 + DefOptScanFactorV2 = 100.0 + DefOptDescScanFactorV2 = 150.0 + DefOptTiFlashScanFactorV2 = 15.0 + DefOptSeekFactorV2 = 9500000.0 + DefOptMemoryFactorV2 = 0.001 + DefOptDiskFactorV2 = 1.5 + DefOptConcurrencyFactorV2 = 3.0 + DefOptInSubqToJoinAndAgg = true + DefOptPreferRangeScan = false + DefBatchInsert = false + DefBatchDelete = false + DefBatchCommit = false + DefCurretTS = 0 + DefInitChunkSize = 32 + DefMaxChunkSize = 1024 + DefDMLBatchSize = 0 + DefMaxPreparedStmtCount = -1 + DefWaitTimeout = 28800 + DefTiDBMemQuotaApplyCache = 32 << 20 // 32MB. + DefTiDBMemQuotaBindingCache = 64 << 20 // 64MB. + DefTiDBGeneralLog = false + DefTiDBPProfSQLCPU = 0 + DefTiDBRetryLimit = 10 + DefTiDBDisableTxnAutoRetry = true + DefTiDBConstraintCheckInPlace = false + DefTiDBHashJoinConcurrency = ConcurrencyUnset + DefTiDBProjectionConcurrency = ConcurrencyUnset + DefBroadcastJoinThresholdSize = 100 * 1024 * 1024 + DefBroadcastJoinThresholdCount = 10 * 1024 + DefTiDBOptimizerSelectivityLevel = 0 + DefTiDBOptimizerEnableNewOFGB = false + DefTiDBEnableOuterJoinReorder = true + DefTiDBAllowBatchCop = 1 + DefTiDBAllowMPPExecution = true + DefTiDBHashExchangeWithNewCollation = true + DefTiDBEnforceMPPExecution = false + DefTiFlashMaxThreads = -1 + DefTiDBMPPStoreFailTTL = "60s" + DefTiDBTxnMode = "" + DefTiDBRowFormatV1 = 1 + DefTiDBRowFormatV2 = 2 + DefTiDBDDLReorgWorkerCount = 4 + DefTiDBDDLReorgBatchSize = 256 + DefTiDBDDLErrorCountLimit = 512 + DefTiDBMaxDeltaSchemaCount = 1024 + DefTiDBChangeMultiSchema = false + DefTiDBPointGetCache = false + DefTiDBPlacementMode = PlacementModeStrict + DefTiDBEnableAutoIncrementInGenerated = false + DefTiDBHashAggPartialConcurrency = ConcurrencyUnset + DefTiDBHashAggFinalConcurrency = ConcurrencyUnset + DefTiDBWindowConcurrency = ConcurrencyUnset + DefTiDBMergeJoinConcurrency = 1 // disable optimization by default + DefTiDBStreamAggConcurrency = 1 + DefTiDBForcePriority = mysql.NoPriority + DefEnableWindowFunction = true + DefEnablePipelinedWindowFunction = true + DefEnableStrictDoubleTypeCheck = true + DefEnableVectorizedExpression = true + DefTiDBOptJoinReorderThreshold = 0 + DefTiDBDDLSlowOprThreshold = 300 + DefTiDBUseFastAnalyze = false + DefTiDBSkipIsolationLevelCheck = false + DefTiDBExpensiveQueryTimeThreshold = 60 // 60s + DefTiDBScatterRegion = false + DefTiDBWaitSplitRegionFinish = true + DefWaitSplitRegionTimeout = 300 // 300s + DefTiDBEnableNoopFuncs = Off + DefTiDBEnableNoopVariables = true + DefTiDBAllowRemoveAutoInc = false + DefTiDBUsePlanBaselines = true + DefTiDBEvolvePlanBaselines = false + DefTiDBEvolvePlanTaskMaxTime = 600 // 600s + DefTiDBEvolvePlanTaskStartTime = "00:00 +0000" + DefTiDBEvolvePlanTaskEndTime = "23:59 +0000" + DefInnodbLockWaitTimeout = 50 // 50s + DefTiDBStoreLimit = 0 + DefTiDBMetricSchemaStep = 60 // 60s + DefTiDBMetricSchemaRangeDuration = 60 // 60s + DefTiDBFoundInPlanCache = false + DefTiDBFoundInBinding = false + DefTiDBEnableCollectExecutionInfo = true + DefTiDBAllowAutoRandExplicitInsert = false + DefTiDBEnableClusteredIndex = ClusteredIndexDefModeIntOnly + DefTiDBRedactLog = false + DefTiDBRestrictedReadOnly = false + DefTiDBSuperReadOnly = false + DefTiDBShardAllocateStep = math.MaxInt64 + DefTiDBEnableTelemetry = true + DefTiDBEnableParallelApply = false + DefTiDBEnableAmendPessimisticTxn = false + DefTiDBPartitionPruneMode = "static" + DefTiDBEnableRateLimitAction = true + DefTiDBEnableAsyncCommit = false + DefTiDBEnable1PC = false + DefTiDBGuaranteeLinearizability = true + DefTiDBAnalyzeVersion = 2 + DefTiDBEnableIndexMergeJoin = false + DefTiDBTrackAggregateMemoryUsage = true + DefTiDBEnableExchangePartition = false + DefCTEMaxRecursionDepth = 1000 + DefTiDBTmpTableMaxSize = 64 << 20 // 64MB. + DefTiDBEnableLocalTxn = false + DefTiDBTSOClientBatchMaxWaitTime = 0.0 // 0ms + DefTiDBEnableTSOFollowerProxy = false + DefTiDBEnableOrderedResultMode = false + DefTiDBEnablePseudoForOutdatedStats = true + DefTiDBRegardNULLAsPoint = true + DefEnablePlacementCheck = true + DefTimestamp = "0" + DefTiDBEnableStmtSummary = true + DefTiDBStmtSummaryInternalQuery = false + DefTiDBStmtSummaryRefreshInterval = 1800 + DefTiDBStmtSummaryHistorySize = 24 + DefTiDBStmtSummaryMaxStmtCount = 3000 + DefTiDBStmtSummaryMaxSQLLength = 4096 + DefTiDBCapturePlanBaseline = Off + DefTiDBEnableIndexMerge = true + DefEnableLegacyInstanceScope = true + DefTiDBTableCacheLease = 3 // 3s + DefTiDBPersistAnalyzeOptions = true + DefTiDBEnableColumnTracking = false + DefTiDBStatsLoadSyncWait = 0 + DefTiDBStatsLoadPseudoTimeout = false + DefSysdateIsNow = false + DefTiDBEnableMutationChecker = false + DefTiDBTxnAssertionLevel = AssertionOffStr + DefTiDBIgnorePreparedCacheCloseStmt = false + DefTiDBBatchPendingTiFlashCount = 4000 + DefRCReadCheckTS = false + DefTiDBRemoveOrderbyInSubquery = false + DefTiDBReadStaleness = 0 + DefTiDBGCMaxWaitTime = 24 * 60 * 60 + DefMaxAllowedPacket uint64 = 67108864 + DefTiDBEnableBatchDML = false + DefTiDBMemQuotaQuery = 1073741824 // 1GB + DefTiDBStatsCacheMemQuota = 0 + MaxTiDBStatsCacheMemQuota = 1024 * 1024 * 1024 * 1024 // 1TB + DefTiDBQueryLogMaxLen = 4096 + DefRequireSecureTransport = false + DefTiDBCommitterConcurrency = 128 + DefTiDBBatchDMLIgnoreError = false + DefTiDBMemQuotaAnalyze = -1 + DefTiDBEnableAutoAnalyze = true + DefTiDBMemOOMAction = "CANCEL" + DefTiDBMaxAutoAnalyzeTime = 12 * 60 * 60 + DefTiDBEnablePrepPlanCache = true + DefTiDBPrepPlanCacheSize = 100 + DefTiDBPrepPlanCacheMemoryGuardRatio = 0.1 + DefTiDBEnableConcurrentDDL = true + DefTiDBSimplifiedMetrics = false + DefTiDBEnablePaging = true + DefTiFlashFineGrainedShuffleStreamCount = -1 + DefStreamCountWhenMaxThreadsNotSet = 8 + DefTiFlashFineGrainedShuffleBatchSize = 8192 ) // Process global variables. diff --git a/sessiontxn/interface.go b/sessiontxn/interface.go index 6d809fa923c38..60795aca52fd4 100644 --- a/sessiontxn/interface.go +++ b/sessiontxn/interface.go @@ -117,13 +117,17 @@ type TxnContextProvider interface { TxnAdvisable // GetTxnInfoSchema returns the information schema used by txn GetTxnInfoSchema() infoschema.InfoSchema - // GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) + // GetTxnScope returns the current txn scope + GetTxnScope() string + // GetReadReplicaScope returns the read replica scope + GetReadReplicaScope() string + //GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) GetStmtReadTS() (uint64, error) // GetStmtForUpdateTS returns the read timestamp used by update/insert/delete or select ... for update GetStmtForUpdateTS() (uint64, error) - // GetSnapshotWithStmtReadTS get snapshot with read ts + // GetSnapshotWithStmtReadTS gets snapshot with read ts GetSnapshotWithStmtReadTS() (kv.Snapshot, error) - // GetSnapshotWithStmtForUpdateTS get snapshot with for update ts + // GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) // OnInitialize is the hook that should be called when enter a new txn with this provider @@ -145,16 +149,20 @@ type TxnManager interface { // If the session is not in any transaction, for example: between two autocommit statements, // this method will return the latest information schema in session that is same with `sessionctx.GetDomainInfoSchema()` GetTxnInfoSchema() infoschema.InfoSchema + // GetTxnScope returns the current txn scope + GetTxnScope() string + // GetReadReplicaScope returns the read replica scope + GetReadReplicaScope() string // GetStmtReadTS returns the read timestamp used by select statement (not for select ... for update) GetStmtReadTS() (uint64, error) // GetStmtForUpdateTS returns the read timestamp used by update/insert/delete or select ... for update GetStmtForUpdateTS() (uint64, error) // GetContextProvider returns the current TxnContextProvider GetContextProvider() TxnContextProvider - // GetReadSnapshot get snapshot with read ts - GetReadSnapshot() (kv.Snapshot, error) - // GetForUpdateSnapshot get snapshot with for update ts - GetForUpdateSnapshot() (kv.Snapshot, error) + // GetSnapshotWithStmtReadTS gets snapshot with read ts + GetSnapshotWithStmtReadTS() (kv.Snapshot, error) + // GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts + GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) // EnterNewTxn enters a new transaction. EnterNewTxn(ctx context.Context, req *EnterNewTxnRequest) error diff --git a/sessiontxn/isolation/BUILD.bazel b/sessiontxn/isolation/BUILD.bazel index be023d95df2bb..a05b08583768a 100644 --- a/sessiontxn/isolation/BUILD.bazel +++ b/sessiontxn/isolation/BUILD.bazel @@ -12,6 +12,7 @@ go_library( importpath = "github.com/pingcap/tidb/sessiontxn/isolation", visibility = ["//visibility:public"], deps = [ + "//config", "//infoschema", "//kv", "//parser/ast", @@ -53,6 +54,7 @@ go_test( "//sessionctx", "//sessiontxn", "//testkit", + "//testkit/testfork", "//testkit/testsetup", "//types", "@com_github_pingcap_errors//:errors", @@ -61,6 +63,7 @@ go_test( "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//error", "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_goleak//:goleak", ], ) diff --git a/sessiontxn/isolation/base.go b/sessiontxn/isolation/base.go index 53386fa32c47c..877adebd564ca 100644 --- a/sessiontxn/isolation/base.go +++ b/sessiontxn/isolation/base.go @@ -19,6 +19,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -117,6 +118,25 @@ func (p *baseTxnContextProvider) GetTxnInfoSchema() infoschema.InfoSchema { return p.infoSchema } +func (p *baseTxnContextProvider) GetTxnScope() string { + return p.sctx.GetSessionVars().TxnCtx.TxnScope +} + +func (p *baseTxnContextProvider) GetReadReplicaScope() string { + if txnScope := p.GetTxnScope(); txnScope != kv.GlobalTxnScope && txnScope != "" { + // In local txn, we should use txnScope as the readReplicaScope + return txnScope + } + + if p.sctx.GetSessionVars().GetReplicaRead().IsClosestRead() { + // If closest read is set, we should use the scope where instance located. + return config.GetTxnScopeFromConfig() + } + + // When it is not local txn or closet read, we should use global scope + return kv.GlobalReplicaScope +} + func (p *baseTxnContextProvider) GetStmtReadTS() (uint64, error) { if _, err := p.ActivateTxn(); err != nil { return 0, err @@ -288,7 +308,7 @@ func (p *baseTxnContextProvider) AdviseOptimizeWithPlan(_ interface{}) error { return nil } -// GetSnapshotWithStmtReadTS get snapshot with read ts +// GetSnapshotWithStmtReadTS gets snapshot with read ts func (p *baseTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { ts, err := p.GetStmtReadTS() if err != nil { @@ -298,7 +318,7 @@ func (p *baseTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error return p.getSnapshotByTS(ts) } -// GetSnapshotWithStmtForUpdateTS get snapshot with for update ts +// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts func (p *baseTxnContextProvider) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) { ts, err := p.GetStmtForUpdateTS() if err != nil { diff --git a/sessiontxn/isolation/main_test.go b/sessiontxn/isolation/main_test.go index 4c04d03243363..be85f098e31b6 100644 --- a/sessiontxn/isolation/main_test.go +++ b/sessiontxn/isolation/main_test.go @@ -16,21 +16,27 @@ package isolation_test import ( "context" + "fmt" "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/pingcap/tidb/testkit/testsetup" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" + "github.com/tikv/client-go/v2/tikv" "go.uber.org/goleak" ) func TestMain(m *testing.M) { testsetup.SetupForCommonTest() + tikv.EnableFailpoints() opts := []goleak.Option{ goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), @@ -39,7 +45,7 @@ func TestMain(m *testing.M) { goleak.VerifyTestMain(m, opts...) } -func getOracleTS(t *testing.T, sctx sessionctx.Context) uint64 { +func getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 { ts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) require.NoError(t, err) return ts @@ -57,7 +63,7 @@ type txnAssert[T sessiontxn.TxnContextProvider] struct { couldRetry bool } -func (a *txnAssert[T]) Check(t *testing.T) { +func (a *txnAssert[T]) Check(t testing.TB) { provider := sessiontxn.GetTxnManager(a.sctx).GetContextProvider() sessVars := a.sctx.GetSessionVars() txnCtx := sessVars.TxnCtx @@ -76,6 +82,9 @@ func (a *txnAssert[T]) Check(t *testing.T) { require.Equal(t, a.inTxn, sessVars.InTxn()) require.Equal(t, a.inTxn, txnCtx.IsExplicit) require.Equal(t, a.couldRetry, txnCtx.CouldRetry) + require.Equal(t, assertTxnScope, txnCtx.TxnScope) + require.Equal(t, assertTxnScope, provider.GetTxnScope()) + require.Equal(t, assertReplicaReadScope, provider.GetReadReplicaScope()) txn, err := a.sctx.Txn(false) require.NoError(t, err) @@ -111,7 +120,53 @@ func activeSnapshotTxnAssert(sctx sessionctx.Context, ts uint64, isolation strin } } -func (a *txnAssert[T]) CheckAndGetProvider(t *testing.T) T { +func (a *txnAssert[T]) CheckAndGetProvider(t testing.TB) T { a.Check(t) return sessiontxn.GetTxnManager(a.sctx).GetContextProvider().(T) } + +var assertTxnScope = kv.GlobalTxnScope +var assertReplicaReadScope = kv.GlobalReplicaScope + +func forkScopeSettings(t *testfork.T, store kv.Storage) func() { + tk := testkit.NewTestKit(t, store) + failPointEnabled := false + clearFunc := func() { + assertTxnScope = kv.GlobalTxnScope + assertReplicaReadScope = kv.GlobalReplicaScope + tk.MustExec("set @@global.tidb_replica_read='leader'") + tk.MustExec("set @@global.tidb_enable_local_txn=0") + if failPointEnabled { + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + } + } + + clearFunc() + success := false + defer func() { + if !success { + clearFunc() + } + }() + + zone := testfork.PickEnum(t, "", "bj") + if zone != "" { + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, zone))) + failPointEnabled = true + if testfork.PickEnum(t, "", "enableLocalTxn") != "" { + tk.MustExec("set @@global.tidb_enable_local_txn=1") + assertTxnScope = zone + assertReplicaReadScope = zone + } + } + + if testfork.PickEnum(t, "", "closetRead") != "" { + tk.MustExec("set @@global.tidb_replica_read='closest-replicas'") + if zone != "" { + assertReplicaReadScope = zone + } + } + + success = true + return clearFunc +} diff --git a/sessiontxn/isolation/optimistic_test.go b/sessiontxn/isolation/optimistic_test.go index d6b4d9b3b7d26..4d6c7f56fc23b 100644 --- a/sessiontxn/isolation/optimistic_test.go +++ b/sessiontxn/isolation/optimistic_test.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -208,85 +209,76 @@ func TestOptimisticHandleError(t *testing.T) { func TestOptimisticProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - - tk := testkit.NewTestKit(t, store) - se := tk.Session() - - // begin outside a txn - assert := activeOptimisticTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin in a txn - assert = activeOptimisticTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin outside a txn when tidb_disable_txn_auto_retry=0 - tk.MustExec("set @@tidb_disable_txn_auto_retry=0") - tk.MustExec("rollback") - assert = activeOptimisticTxnAssert(t, se, true) - assert.couldRetry = true - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activeOptimisticTxnAssert(t, se, true) - assert.causalConsistencyOnly = true - assert.couldRetry = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activeOptimisticTxnAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Optimistic, - })) - assert.Check(t) - - tk.MustExec("rollback") - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - })) - assert.Check(t) - - // non-active txn and then active it - cases := []struct { - disableTxnAutoRetry bool - autocommit bool - }{ - { - true, true, - }, - { - true, false, - }, - { - false, true, - }, - { - false, false, - }, - } - for _, c := range cases { - t.Run(fmt.Sprintf("disableAutRetry: %v, autoCommit: %v", c.disableTxnAutoRetry, c.autocommit), func(t *testing.T) { - tk.MustExec("rollback") - defer tk.MustExec("rollback") - tk.MustExec(fmt.Sprintf("set @@autocommit=%v", c.autocommit)) - tk.MustExec(fmt.Sprintf("set @@tidb_disable_txn_auto_retry=%v", c.disableTxnAutoRetry)) - assert = inactiveOptimisticTxnAssert(se) - assertAfterActive := activeOptimisticTxnAssert(t, se, !c.autocommit) - assertAfterActive.couldRetry = c.autocommit || !c.disableTxnAutoRetry - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - }) - } + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + + // begin outside a txn + assert := activeOptimisticTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin in a txn + assert = activeOptimisticTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin outside a txn when tidb_disable_txn_auto_retry=0 + tk.MustExec("set @@tidb_disable_txn_auto_retry=0") + tk.MustExec("rollback") + assert = activeOptimisticTxnAssert(t, se, true) + assert.couldRetry = true + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activeOptimisticTxnAssert(t, se, true) + assert.causalConsistencyOnly = true + assert.couldRetry = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activeOptimisticTxnAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Optimistic, + })) + assert.Check(t) + + tk.MustExec("rollback") + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + })) + assert.Check(t) + + // non-active txn and then active it + disableTxnAutoRetry := true + if testfork.PickEnum(t, "enableTxnAutoRetry", "") != "" { + disableTxnAutoRetry = false + } + autocommit := true + if testfork.PickEnum(t, "noAutocommit", "") != "" { + autocommit = false + } + tk.MustExec("rollback") + defer tk.MustExec("rollback") + tk.MustExec(fmt.Sprintf("set @@autocommit=%v", autocommit)) + tk.MustExec(fmt.Sprintf("set @@tidb_disable_txn_auto_retry=%v", disableTxnAutoRetry)) + assert = inactiveOptimisticTxnAssert(se) + assertAfterActive := activeOptimisticTxnAssert(t, se, !autocommit) + assertAfterActive.couldRetry = autocommit || !disableTxnAutoRetry + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + }) } func TestTidbSnapshotVarInOptimisticTxn(t *testing.T) { @@ -375,7 +367,7 @@ func TestTidbSnapshotVarInOptimisticTxn(t *testing.T) { } } -func activeOptimisticTxnAssert(t *testing.T, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.OptimisticTxnContextProvider] { +func activeOptimisticTxnAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.OptimisticTxnContextProvider] { return &txnAssert[*isolation.OptimisticTxnContextProvider]{ sctx: sctx, minStartTime: time.Now(), @@ -393,7 +385,7 @@ func inactiveOptimisticTxnAssert(sctx sessionctx.Context) *txnAssert[*isolation. } } -func initializeOptimisticProvider(t *testing.T, tk *testkit.TestKit, withExplicitBegin bool) *isolation.OptimisticTxnContextProvider { +func initializeOptimisticProvider(t testing.TB, tk *testkit.TestKit, withExplicitBegin bool) *isolation.OptimisticTxnContextProvider { tk.MustExec("commit") if withExplicitBegin { assert := activeOptimisticTxnAssert(t, tk.Session(), true) diff --git a/sessiontxn/isolation/readcommitted.go b/sessiontxn/isolation/readcommitted.go index d2afad4c2ea26..06adc9fcba4fc 100644 --- a/sessiontxn/isolation/readcommitted.go +++ b/sessiontxn/isolation/readcommitted.go @@ -258,7 +258,7 @@ func (p *PessimisticRCTxnContextProvider) AdviseOptimizeWithPlan(val interface{} return nil } -// GetSnapshotWithStmtReadTS get snapshot with read ts +// GetSnapshotWithStmtReadTS gets snapshot with read ts func (p *PessimisticRCTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { snapshot, err := p.baseTxnContextProvider.GetSnapshotWithStmtForUpdateTS() if err != nil { diff --git a/sessiontxn/isolation/readcommitted_test.go b/sessiontxn/isolation/readcommitted_test.go index a01066f3588b1..a0211d2cf83f0 100644 --- a/sessiontxn/isolation/readcommitted_test.go +++ b/sessiontxn/isolation/readcommitted_test.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/pingcap/tidb/types" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" @@ -287,61 +288,66 @@ func TestRCProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - tk := testkit.NewTestKit(t, store) - se := tk.Session() - tk.MustExec("set @@tx_isolation = 'READ-COMMITTED'") - tk.MustExec("set @@tidb_txn_mode='pessimistic'") - - // begin outside a txn - assert := activeRCTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin in a txn - assert = activeRCTxnAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activeRCTxnAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activeRCTxnAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Pessimistic, - })) - assert.Check(t) - - // non-active txn and then active it - tk.MustExec("rollback") - tk.MustExec("set @@autocommit=0") - assert = inactiveRCTxnAssert(se) - assertAfterActive := activeRCTxnAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") - - // Case Pessimistic Autocommit - config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) - assert = inactiveRCTxnAssert(se) - assertAfterActive = activeRCTxnAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err = provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + tk.MustExec("set @@tx_isolation = 'READ-COMMITTED'") + tk.MustExec("set @@tidb_txn_mode='pessimistic'") + + // begin outside a txn + assert := activeRCTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin in a txn + assert = activeRCTxnAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activeRCTxnAssert(t, se, true) + assert.causalConsistencyOnly = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activeRCTxnAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Pessimistic, + })) + assert.Check(t) + + // non-active txn and then active it + tk.MustExec("rollback") + tk.MustExec("set @@autocommit=0") + assert = inactiveRCTxnAssert(se) + assertAfterActive := activeRCTxnAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + + // Case Pessimistic Autocommit + config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) + assert = inactiveRCTxnAssert(se) + assertAfterActive = activeRCTxnAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider = assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err = provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + }) } func TestTidbSnapshotVarInRC(t *testing.T) { @@ -517,7 +523,7 @@ func TestConflictErrorsInRC(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertPessimisticLockErr")) } -func activeRCTxnAssert(t *testing.T, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticRCTxnContextProvider] { +func activeRCTxnAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticRCTxnContextProvider] { return &txnAssert[*isolation.PessimisticRCTxnContextProvider]{ sctx: sctx, isolation: "READ-COMMITTED", @@ -537,7 +543,7 @@ func inactiveRCTxnAssert(sctx sessionctx.Context) *txnAssert[*isolation.Pessimis } } -func initializePessimisticRCProvider(t *testing.T, tk *testkit.TestKit) *isolation.PessimisticRCTxnContextProvider { +func initializePessimisticRCProvider(t testing.TB, tk *testkit.TestKit) *isolation.PessimisticRCTxnContextProvider { tk.MustExec("set @@tx_isolation = 'READ-COMMITTED'") assert := activeRCTxnAssert(t, tk.Session(), true) tk.MustExec("begin pessimistic") diff --git a/sessiontxn/isolation/repeatable_read_test.go b/sessiontxn/isolation/repeatable_read_test.go index c1487a1bb0ae7..f8fc70fe8315a 100644 --- a/sessiontxn/isolation/repeatable_read_test.go +++ b/sessiontxn/isolation/repeatable_read_test.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -192,61 +193,66 @@ func TestRepeatableReadProviderInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - tk := testkit.NewTestKit(t, store) - se := tk.Session() - tk.MustExec("set @@tx_isolation = 'REPEATABLE-READ'") - tk.MustExec("set @@tidb_txn_mode='pessimistic'") - - // begin outside a txn - assert := activePessimisticRRAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin in a txn - assert = activePessimisticRRAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activePessimisticRRAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activePessimisticRRAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Pessimistic, - })) - assert.Check(t) - - // non-active txn and then active it - tk.MustExec("rollback") - tk.MustExec("set @@autocommit=0") - assert = inactivePessimisticRRAssert(se) - assertAfterActive := activePessimisticRRAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") - - // Case Pessimistic Autocommit - config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) - assert = inactivePessimisticRRAssert(se) - assertAfterActive = activePessimisticRRAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err = provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + tk.MustExec("set @@tx_isolation = 'REPEATABLE-READ'") + tk.MustExec("set @@tidb_txn_mode='pessimistic'") + + // begin outside a txn + assert := activePessimisticRRAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin in a txn + assert = activePessimisticRRAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activePessimisticRRAssert(t, se, true) + assert.causalConsistencyOnly = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activePessimisticRRAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Pessimistic, + })) + assert.Check(t) + + // non-active txn and then active it + tk.MustExec("rollback") + tk.MustExec("set @@autocommit=0") + assert = inactivePessimisticRRAssert(se) + assertAfterActive := activePessimisticRRAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + + // Case Pessimistic Autocommit + config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) + assert = inactivePessimisticRRAssert(se) + assertAfterActive = activePessimisticRRAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider = assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err = provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + }) } func TestTidbSnapshotVarInPessimisticRepeatableRead(t *testing.T) { @@ -632,7 +638,7 @@ func TestConflictErrorInOtherQueryContainingPointGet(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/assertPessimisticLockErr")) } -func activePessimisticRRAssert(t *testing.T, sctx sessionctx.Context, +func activePessimisticRRAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticRRTxnContextProvider] { return &txnAssert[*isolation.PessimisticRRTxnContextProvider]{ sctx: sctx, diff --git a/sessiontxn/isolation/serializable_test.go b/sessiontxn/isolation/serializable_test.go index f192adf909369..90034e0934278 100644 --- a/sessiontxn/isolation/serializable_test.go +++ b/sessiontxn/isolation/serializable_test.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/sessiontxn" "github.com/pingcap/tidb/sessiontxn/isolation" "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/testkit/testfork" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" ) @@ -110,62 +111,67 @@ func TestSerializableInitialize(t *testing.T) { store, _, clean := testkit.CreateMockStoreAndDomain(t) defer clean() - tk := testkit.NewTestKit(t, store) - se := tk.Session() - tk.MustExec("set tidb_skip_isolation_level_check = 1") - tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'") - tk.MustExec("set @@tidb_txn_mode='pessimistic'") - - // begin outsize a txn - assert := activeSerializableAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // begin outsize a txn - assert = activeSerializableAssert(t, se, true) - tk.MustExec("begin") - assert.Check(t) - - // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY - assert = activeSerializableAssert(t, se, true) - assert.causalConsistencyOnly = true - tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") - assert.Check(t) - - // EnterNewTxnDefault will create an active txn, but not explicit - assert = activeSerializableAssert(t, se, false) - require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ - Type: sessiontxn.EnterNewTxnDefault, - TxnMode: ast.Pessimistic, - })) - assert.Check(t) - - // non-active txn and then active it - tk.MustExec("rollback") - tk.MustExec("set @@autocommit=0") - assert = inactiveSerializableAssert(se) - assertAfterActive := activeSerializableAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider := assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err := provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") - - // Case Pessimistic Autocommit - config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) - assert = inactiveSerializableAssert(se) - assertAfterActive = activeSerializableAssert(t, se, true) - require.NoError(t, se.PrepareTxnCtx(context.TODO())) - provider = assert.CheckAndGetProvider(t) - require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) - ts, err = provider.GetStmtReadTS() - require.NoError(t, err) - assertAfterActive.Check(t) - require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) - tk.MustExec("rollback") + testfork.RunTest(t, func(t *testfork.T) { + clearScopeSettings := forkScopeSettings(t, store) + defer clearScopeSettings() + + tk := testkit.NewTestKit(t, store) + se := tk.Session() + tk.MustExec("set tidb_skip_isolation_level_check = 1") + tk.MustExec("set @@tx_isolation = 'SERIALIZABLE'") + tk.MustExec("set @@tidb_txn_mode='pessimistic'") + + // begin outsize a txn + assert := activeSerializableAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // begin outsize a txn + assert = activeSerializableAssert(t, se, true) + tk.MustExec("begin") + assert.Check(t) + + // START TRANSACTION WITH CAUSAL CONSISTENCY ONLY + assert = activeSerializableAssert(t, se, true) + assert.causalConsistencyOnly = true + tk.MustExec("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY") + assert.Check(t) + + // EnterNewTxnDefault will create an active txn, but not explicit + assert = activeSerializableAssert(t, se, false) + require.NoError(t, sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnDefault, + TxnMode: ast.Pessimistic, + })) + assert.Check(t) + + // non-active txn and then active it + tk.MustExec("rollback") + tk.MustExec("set @@autocommit=0") + assert = inactiveSerializableAssert(se) + assertAfterActive := activeSerializableAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider := assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err := provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + + // Case Pessimistic Autocommit + config.GetGlobalConfig().PessimisticTxn.PessimisticAutoCommit.Store(true) + assert = inactiveSerializableAssert(se) + assertAfterActive = activeSerializableAssert(t, se, true) + require.NoError(t, se.PrepareTxnCtx(context.TODO())) + provider = assert.CheckAndGetProvider(t) + require.NoError(t, provider.OnStmtStart(context.TODO(), nil)) + ts, err = provider.GetStmtReadTS() + require.NoError(t, err) + assertAfterActive.Check(t) + require.Equal(t, ts, se.GetSessionVars().TxnCtx.StartTS) + tk.MustExec("rollback") + }) } func TestTidbSnapshotVarInSerialize(t *testing.T) { @@ -257,7 +263,7 @@ func TestTidbSnapshotVarInSerialize(t *testing.T) { } } -func activeSerializableAssert(t *testing.T, sctx sessionctx.Context, +func activeSerializableAssert(t testing.TB, sctx sessionctx.Context, inTxn bool) *txnAssert[*isolation.PessimisticSerializableTxnContextProvider] { return &txnAssert[*isolation.PessimisticSerializableTxnContextProvider]{ sctx: sctx, diff --git a/sessiontxn/staleread/BUILD.bazel b/sessiontxn/staleread/BUILD.bazel index e6a5696974964..d6272550153af 100644 --- a/sessiontxn/staleread/BUILD.bazel +++ b/sessiontxn/staleread/BUILD.bazel @@ -12,6 +12,7 @@ go_library( importpath = "github.com/pingcap/tidb/sessiontxn/staleread", visibility = ["//visibility:public"], deps = [ + "//config", "//domain", "//errno", "//expression", @@ -32,20 +33,28 @@ go_library( go_test( name = "staleread_test", - srcs = ["processor_test.go"], + srcs = [ + "main_test.go", + "processor_test.go", + "provider_test.go", + ], deps = [ ":staleread", "//domain", "//infoschema", + "//kv", "//parser", "//parser/ast", "//sessionctx", + "//sessiontxn", "//table/temptable", "//testkit", "//testkit/testsetup", "@com_github_pingcap_errors//:errors", + "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//oracle", + "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_goleak//:goleak", ], ) diff --git a/sessiontxn/staleread/main_test.go b/sessiontxn/staleread/main_test.go new file mode 100644 index 0000000000000..a60b5a95c9007 --- /dev/null +++ b/sessiontxn/staleread/main_test.go @@ -0,0 +1,33 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package staleread_test + +import ( + "testing" + + "github.com/pingcap/tidb/testkit/testsetup" + "github.com/tikv/client-go/v2/tikv" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + testsetup.SetupForCommonTest() + tikv.EnableFailpoints() + opts := []goleak.Option{ + goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), + goleak.IgnoreTopFunction("go.etcd.io/etcd/client/pkg/v3/logutil.(*MergeLogger).outputLoop"), + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + } + goleak.VerifyTestMain(m, opts...) +} diff --git a/sessiontxn/staleread/processor_test.go b/sessiontxn/staleread/processor_test.go index 0e399d3b78760..111b9088fb364 100644 --- a/sessiontxn/staleread/processor_test.go +++ b/sessiontxn/staleread/processor_test.go @@ -28,21 +28,10 @@ import ( "github.com/pingcap/tidb/sessiontxn/staleread" "github.com/pingcap/tidb/table/temptable" "github.com/pingcap/tidb/testkit" - "github.com/pingcap/tidb/testkit/testsetup" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" - "go.uber.org/goleak" ) -func TestMain(m *testing.M) { - opts := []goleak.Option{ - goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), - } - testsetup.SetupForCommonTest() - goleak.VerifyTestMain(m, opts...) -} - type staleReadPoint struct { tk *testkit.TestKit ts uint64 diff --git a/sessiontxn/staleread/provider.go b/sessiontxn/staleread/provider.go index 289d4bcc024e8..417154b7ea420 100644 --- a/sessiontxn/staleread/provider.go +++ b/sessiontxn/staleread/provider.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -49,6 +50,16 @@ func (p *StalenessTxnContextProvider) GetTxnInfoSchema() infoschema.InfoSchema { return p.is } +// GetTxnScope returns the current txn scope +func (p *StalenessTxnContextProvider) GetTxnScope() string { + return p.sctx.GetSessionVars().TxnCtx.TxnScope +} + +// GetReadReplicaScope returns the read replica scope +func (p *StalenessTxnContextProvider) GetReadReplicaScope() string { + return config.GetTxnScopeFromConfig() +} + // GetStmtReadTS returns the read timestamp func (p *StalenessTxnContextProvider) GetStmtReadTS() (uint64, error) { return p.ts, nil @@ -97,6 +108,7 @@ func (p *StalenessTxnContextProvider) enterNewStaleTxnWithReplaceProvider() erro } txnCtx := p.sctx.GetSessionVars().TxnCtx + txnCtx.TxnScope = kv.GlobalTxnScope txnCtx.IsStaleness = true txnCtx.InfoSchema = p.is return nil @@ -150,7 +162,7 @@ func (p *StalenessTxnContextProvider) AdviseOptimizeWithPlan(_ interface{}) erro return nil } -// GetSnapshotWithStmtReadTS get snapshot with read ts and set the transaction related options +// GetSnapshotWithStmtReadTS gets snapshot with read ts and set the transaction related options // before return func (p *StalenessTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, error) { txn, err := p.sctx.Txn(false) @@ -174,7 +186,7 @@ func (p *StalenessTxnContextProvider) GetSnapshotWithStmtReadTS() (kv.Snapshot, return snapshot, nil } -// GetSnapshotWithStmtForUpdateTS get snapshot with for update ts +// GetSnapshotWithStmtForUpdateTS gets snapshot with for update ts func (p *StalenessTxnContextProvider) GetSnapshotWithStmtForUpdateTS() (kv.Snapshot, error) { return nil, errors.New("GetSnapshotWithStmtForUpdateTS not supported for stalenessTxnProvider") } diff --git a/sessiontxn/staleread/provider_test.go b/sessiontxn/staleread/provider_test.go new file mode 100644 index 0000000000000..0b50f06f41746 --- /dev/null +++ b/sessiontxn/staleread/provider_test.go @@ -0,0 +1,116 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package staleread_test + +import ( + "context" + "fmt" + "testing" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessiontxn" + "github.com/pingcap/tidb/sessiontxn/staleread" + "github.com/pingcap/tidb/testkit" + "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/oracle" +) + +func TestStaleReadTxnScope(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + + checkProviderTxnScope := func() { + provider := createStaleReadProvider(t, tk, false) + require.Equal(t, kv.GlobalTxnScope, provider.GetTxnScope()) + + provider = createStaleReadProvider(t, tk, true) + require.Equal(t, kv.GlobalTxnScope, provider.GetTxnScope()) + + tk.MustExec("rollback") + } + + checkProviderTxnScope() + + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, "bj"))) + defer func() { + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + }() + + checkProviderTxnScope() + + tk.MustExec("set @@global.tidb_enable_local_txn=1") + tk.MustExec("rollback") + tk = testkit.NewTestKit(t, store) + checkProviderTxnScope() +} + +func TestStaleReadReplicaReadScope(t *testing.T) { + store, _, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + + checkProviderReplicaReadScope := func(scope string) { + provider := createStaleReadProvider(t, tk, false) + require.Equal(t, scope, provider.GetReadReplicaScope()) + + provider = createStaleReadProvider(t, tk, true) + require.Equal(t, scope, provider.GetReadReplicaScope()) + + tk.MustExec("rollback") + } + + checkProviderReplicaReadScope(kv.GlobalReplicaScope) + + require.NoError(t, failpoint.Enable("tikvclient/injectTxnScope", fmt.Sprintf(`return("%v")`, "bj"))) + defer func() { + require.NoError(t, failpoint.Disable("tikvclient/injectTxnScope")) + }() + + checkProviderReplicaReadScope("bj") +} + +func createStaleReadProvider(t *testing.T, tk *testkit.TestKit, explicitTxn bool) *staleread.StalenessTxnContextProvider { + tk.MustExec("rollback") + require.NoError(t, tk.Session().PrepareTxnCtx(context.TODO())) + se := tk.Session() + ts := getOracleTS(t, se) + if explicitTxn { + err := sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnWithBeginStmt, + StaleReadTS: ts, + }) + require.NoError(t, err) + } else { + is, err := domain.GetDomain(se).GetSnapshotInfoSchema(ts) + require.NoError(t, err) + err = sessiontxn.GetTxnManager(se).EnterNewTxn(context.TODO(), &sessiontxn.EnterNewTxnRequest{ + Type: sessiontxn.EnterNewTxnWithReplaceProvider, + Provider: staleread.NewStalenessTxnContextProvider(se, ts, is), + }) + require.NoError(t, err) + } + return sessiontxn.GetTxnManager(se).GetContextProvider().(*staleread.StalenessTxnContextProvider) +} + +func getOracleTS(t testing.TB, sctx sessionctx.Context) uint64 { + ts, err := sctx.GetStore().GetOracle().GetTimestamp(context.TODO(), &oracle.Option{TxnScope: oracle.GlobalTxnScope}) + require.NoError(t, err) + return ts +} diff --git a/sessiontxn/txn_context_test.go b/sessiontxn/txn_context_test.go index 98aa0d7d2cd96..75a3b72f1ac38 100644 --- a/sessiontxn/txn_context_test.go +++ b/sessiontxn/txn_context_test.go @@ -587,13 +587,13 @@ func TestTxnContextForPrepareExecute(t *testing.T) { } func TestTxnContextForStaleReadInPrepare(t *testing.T) { - store, do, deferFunc := setupTxnContextTest(t) + store, _, deferFunc := setupTxnContextTest(t) defer deferFunc() tk := testkit.NewTestKit(t, store) tk.MustExec("use test") se := tk.Session() - is1 := do.InfoSchema() + is1 := se.GetDomainInfoSchema() tk.MustExec("do sleep(0.1)") tk.MustExec("set @a=now(6)") tk.MustExec("prepare s1 from 'select * from t1 where id=1'") @@ -660,6 +660,32 @@ func TestTxnContextForStaleReadInPrepare(t *testing.T) { doWithCheckPath(t, se, normalPathRecords, func() { tk.MustExec("execute s3") }) + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + + // stale read should not use plan cache + is2 := se.GetDomainInfoSchema() + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + tk.MustExec("set @@tx_read_ts=''") + tk.MustExec("do sleep(0.1)") + tk.MustExec("set @b=now(6)") + tk.MustExec("do sleep(0.1)") + tk.MustExec("update t1 set v=v+1 where id=1") + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, is2) + doWithCheckPath(t, se, path, func() { + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 12")) + }) + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + tk.MustExec("set @@tx_read_ts=@b") + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, is2) + doWithCheckPath(t, se, path, func() { + rs, err := se.ExecutePreparedStmt(context.TODO(), stmtID1, nil) + require.NoError(t, err) + tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 11")) + }) + se.SetValue(sessiontxn.AssertTxnInfoSchemaKey, nil) + tk.MustExec("set @@tx_read_ts=''") } func TestTxnContextPreparedStmtWithForUpdate(t *testing.T) { @@ -776,3 +802,83 @@ func TestStillWriteConflictAfterRetry(t *testing.T) { } }) } + +func TestOptimisticTxnRetryInPessimisticMode(t *testing.T) { + store, _, deferFunc := setupTxnContextTest(t) + defer deferFunc() + + queries := []string{ + "update t1 set v=v+1", + "update t1 set v=v+1 where id=1", + "update t1 set v=v+1 where id=1 and v>0", + "update t1 set v=v+1 where id in (1, 2, 3)", + "update t1 set v=v+1 where id in (1, 2, 3) and v>0", + } + + testfork.RunTest(t, func(t *testfork.T) { + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("truncate table t1") + tk.MustExec("insert into t1 values(1, 10)") + tk2 := testkit.NewSteppedTestKit(t, store) + defer tk2.MustExec("rollback") + + tk2.MustExec("use test") + tk2.MustExec("set @@tidb_txn_mode = 'pessimistic'") + tk2.MustExec("set autocommit = 1") + + // When autocommit meets write conflict, it will retry in pessimistic mode. + // conflictAfterTransfer being true means we encounter a write-conflict again during + // the pessimistic mode. + // doubleConflictAfterTransfer being true means we encounter a write-conflict again + // during the pessimistic retry phase. + // And only conflictAfterTransfer being true allows doubleConflictAfterTransfer being true. + conflictAfterTransfer := testfork.PickEnum(t, true, false) + doubleConflictAfterTransfer := testfork.PickEnum(t, true, false) + if !conflictAfterTransfer && doubleConflictAfterTransfer { + return + } + + tk2.SetBreakPoints( + sessiontxn.BreakPointBeforeExecutorFirstRun, + sessiontxn.BreakPointOnStmtRetryAfterLockError, + ) + + query := testfork.Pick(t, queries) + + tk2.SteppedMustExec(query) + + // Pause the session before the executor first run and then update the record in another session + tk2.ExpectStopOnBreakPoint(sessiontxn.BreakPointBeforeExecutorFirstRun) + // After this update, tk2's statement will encounter write conflict. As it's an autocommit transaction, + // it will transfer to pessimistic transaction mode. + tk.MustExec("update t1 set v=v+1") + + if conflictAfterTransfer { + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointBeforeExecutorFirstRun) + tk.MustExec("update t1 set v=v+1") + + if doubleConflictAfterTransfer { + // Session continues, it should get a lock error and retry, we pause the session before the executor's next run + // and then update the record in another session again. + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointOnStmtRetryAfterLockError) + tk.MustExec("update t1 set v=v+1") + } + + // Because the record is updated by another session again, when this session continues, it will get a lock error again. + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointOnStmtRetryAfterLockError) + tk2.Continue().ExpectIdle() + + if doubleConflictAfterTransfer { + tk2.MustQuery("select * from t1").Check(testkit.Rows("1 14")) + } else { + tk2.MustQuery("select * from t1").Check(testkit.Rows("1 13")) + } + } else { + tk2.Continue().ExpectStopOnBreakPoint(sessiontxn.BreakPointBeforeExecutorFirstRun) + tk2.Continue().ExpectIdle() + + tk2.MustQuery("select * from t1").Check(testkit.Rows("1 12")) + } + }) +} diff --git a/sessiontxn/txn_manager_test.go b/sessiontxn/txn_manager_test.go index 137a06bcd7ce1..983513fa44d03 100644 --- a/sessiontxn/txn_manager_test.go +++ b/sessiontxn/txn_manager_test.go @@ -307,7 +307,7 @@ func TestGetSnapshot(t *testing.T) { ts, err := mgr.GetStmtReadTS() require.NoError(t, err) compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err := mgr.GetReadSnapshot() + snap, err := mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap, snap)) @@ -317,10 +317,10 @@ func TestGetSnapshot(t *testing.T) { ts, err = mgr.GetStmtForUpdateTS() require.NoError(t, err) compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err = mgr.GetReadSnapshot() + snap, err = mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.False(t, isSnapshotEqual(t, compareSnap2, snap)) - snap, err = mgr.GetForUpdateSnapshot() + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) @@ -337,7 +337,7 @@ func TestGetSnapshot(t *testing.T) { ts, err := mgr.GetStmtReadTS() require.NoError(t, err) compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err := mgr.GetReadSnapshot() + snap, err := mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap, snap)) @@ -347,10 +347,10 @@ func TestGetSnapshot(t *testing.T) { ts, err = mgr.GetStmtForUpdateTS() require.NoError(t, err) compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err = mgr.GetReadSnapshot() + snap, err = mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) - snap, err = mgr.GetForUpdateSnapshot() + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) @@ -366,7 +366,7 @@ func TestGetSnapshot(t *testing.T) { ts, err := mgr.GetStmtReadTS() require.NoError(t, err) compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err := mgr.GetReadSnapshot() + snap, err := mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap, snap)) @@ -376,10 +376,10 @@ func TestGetSnapshot(t *testing.T) { ts, err = mgr.GetStmtForUpdateTS() require.NoError(t, err) compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err = mgr.GetReadSnapshot() + snap, err = mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) - snap, err = mgr.GetForUpdateSnapshot() + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) @@ -397,7 +397,7 @@ func TestGetSnapshot(t *testing.T) { ts, err := mgr.GetStmtReadTS() require.NoError(t, err) compareSnap := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err := mgr.GetReadSnapshot() + snap, err := mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap, snap)) @@ -407,10 +407,10 @@ func TestGetSnapshot(t *testing.T) { ts, err = mgr.GetStmtForUpdateTS() require.NoError(t, err) compareSnap2 := sessiontxn.GetSnapshotWithTS(sctx, ts) - snap, err = mgr.GetReadSnapshot() + snap, err = mgr.GetSnapshotWithStmtReadTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) - snap, err = mgr.GetForUpdateSnapshot() + snap, err = mgr.GetSnapshotWithStmtForUpdateTS() require.NoError(t, err) require.True(t, isSnapshotEqual(t, compareSnap2, snap)) diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index 096ef5923d71f..0d0154d3a7327 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -74,8 +74,9 @@ type GCWorker struct { cancel context.CancelFunc done chan error testingKnobs struct { - scanLocks func(key []byte, regionID uint64) []*txnlock.Lock - resolveLocks func(locks []*txnlock.Lock, regionID tikv.RegionVerID) (ok bool, err error) + scanLocks func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock + batchResolveLocks func(locks []*txnlock.Lock, regionID tikv.RegionVerID, safepoint uint64) (ok bool, err error) + resolveLocks func(locks []*txnlock.Lock, lowResolutionTS uint64) (int64, error) } } @@ -147,6 +148,9 @@ const ( gcDefaultConcurrency = 2 gcMinConcurrency = 1 gcMaxConcurrency = 128 + + gcTryResolveLocksIntervalFromNow = time.Minute * 5 + // We don't want gc to sweep out the cached info belong to other processes, like coprocessor. gcScanLockLimit = txnlock.ResolvedCacheSize / 2 @@ -1014,12 +1018,26 @@ func (w *GCWorker) checkUsePhysicalScanLock() (bool, error) { } func (w *GCWorker) resolveLocks(ctx context.Context, safePoint uint64, concurrency int, usePhysical bool) (bool, error) { + // tryResolveLocksTS is defined as `now() - gcTryResolveLocksIntervalFromNow`, + // it used for trying resolve locks, ts of which is smaller than tryResolveLocksTS and expired. + tryResolveLocksTS, err := w.getTryResolveLocksTS() + if err != nil { + return false, err + } + + if tryResolveLocksTS < safePoint { + tryResolveLocksTS = safePoint + } else { + // to do: add a switch for tryResolveLocksTS. + // if the config log-backup.enable is false in PiTR, set safePoint to tryResolveLocksTS directly. + } + if !usePhysical { - return false, w.legacyResolveLocks(ctx, safePoint, concurrency) + return false, w.legacyResolveLocks(ctx, safePoint, tryResolveLocksTS, concurrency) } // First try resolve locks with physical scan - err := w.resolveLocksPhysical(ctx, safePoint) + err = w.resolveLocksPhysical(ctx, safePoint) if err == nil { return true, nil } @@ -1027,21 +1045,28 @@ func (w *GCWorker) resolveLocks(ctx context.Context, safePoint uint64, concurren logutil.Logger(ctx).Error("[gc worker] resolve locks with physical scan failed, trying fallback to legacy resolve lock", zap.String("uuid", w.uuid), zap.Uint64("safePoint", safePoint), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), zap.Error(err)) - return false, w.legacyResolveLocks(ctx, safePoint, concurrency) + return false, w.legacyResolveLocks(ctx, safePoint, tryResolveLocksTS, concurrency) } -func (w *GCWorker) legacyResolveLocks(ctx context.Context, safePoint uint64, concurrency int) error { +func (w *GCWorker) legacyResolveLocks( + ctx context.Context, + safePoint uint64, + tryResolveLocksTS uint64, + concurrency int, +) error { metrics.GCWorkerCounter.WithLabelValues("resolve_locks").Inc() logutil.Logger(ctx).Info("[gc worker] start resolve locks", zap.String("uuid", w.uuid), zap.Uint64("safePoint", safePoint), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), zap.Int("concurrency", concurrency)) startTime := time.Now() handler := func(ctx context.Context, r tikvstore.KeyRange) (rangetask.TaskStat, error) { - return w.resolveLocksForRange(ctx, safePoint, r.StartKey, r.EndKey) + return w.resolveLocksForRange(ctx, safePoint, tryResolveLocksTS, r.StartKey, r.EndKey) } runner := rangetask.NewRangeTaskRunner("resolve-locks-runner", w.tikvStore, concurrency, handler) @@ -1058,17 +1083,90 @@ func (w *GCWorker) legacyResolveLocks(ctx context.Context, safePoint uint64, con logutil.Logger(ctx).Info("[gc worker] finish resolve locks", zap.String("uuid", w.uuid), zap.Uint64("safePoint", safePoint), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), zap.Int("regions", runner.CompletedRegions())) metrics.GCHistogram.WithLabelValues("resolve_locks").Observe(time.Since(startTime).Seconds()) return nil } -func (w *GCWorker) resolveLocksForRange(ctx context.Context, safePoint uint64, startKey []byte, endKey []byte) (rangetask.TaskStat, error) { +// getTryResolveLocksTS gets the TryResolveLocksTS +// that is defined as `now() - gcTryResolveLocksIntervalFromNow`. +func (w *GCWorker) getTryResolveLocksTS() (uint64, error) { + now, err := w.tikvStore.CurrentTimestamp(kv.GlobalTxnScope) + if err != nil { + return 0, err + } + + gcTryResolveLockTS := oracle.ComposeTS(oracle.ExtractPhysical(now)-gcTryResolveLocksIntervalFromNow.Milliseconds(), oracle.ExtractLogical(now)) + return gcTryResolveLockTS, nil +} + +// batchResolveExpiredLocks tries to resolve expired locks with batch method. +// Travesal the given locks and check that: +// 1. If the ts of lock is equal with or smaller than forceResolveLocksTS(acually equals safepoint), +// it will rollback the txn, no matter the lock is expired of not. +// 2. If the ts of lock is larger than forceResolveLocksTS, it will check status of the txn. +// Resolve the lock if txn is expired, Or do nothing. +func (w *GCWorker) batchResolveExpiredLocks( + bo *tikv.Backoffer, + locks []*txnlock.Lock, + loc tikv.RegionVerID, + forceResolveLocksTS uint64, + tryResolveLocksTS uint64, +) (bool, error) { + if len(locks) == 0 { + return true, nil + } + + forceResolveLocks := make([]*txnlock.Lock, 0, len(locks)) + tryResolveLocks := make([]*txnlock.Lock, 0, len(locks)) + for _, l := range locks { + if l.TxnID <= forceResolveLocksTS { + forceResolveLocks = append(forceResolveLocks, l) + } else { + tryResolveLocks = append(tryResolveLocks, l) + } + } + + logutil.BgLogger().Debug("batchResolveExpiredLocks", + zap.Uint64("force-resolve-locks-ts", forceResolveLocksTS), + zap.Uint64("try-resolve-locks-ts", tryResolveLocksTS), + zap.Int("force-resolve-locks-count", len(forceResolveLocks)), + zap.Int("try-resolve-locks-count", len(tryResolveLocks))) + + var ( + ok bool + err error + ) + if w.testingKnobs.batchResolveLocks != nil { + ok, err = w.testingKnobs.batchResolveLocks(forceResolveLocks, loc, forceResolveLocksTS) + } else { + ok, err = w.tikvStore.GetLockResolver().BatchResolveLocks(bo, forceResolveLocks, loc) + } + if err != nil || !ok { + return ok, err + } + + if w.testingKnobs.resolveLocks != nil { + _, err = w.testingKnobs.resolveLocks(tryResolveLocks, tryResolveLocksTS) + } else { + _, err = w.tikvStore.GetLockResolver().ResolveLocks(bo, 0, tryResolveLocks) + } + return err == nil, errors.Trace(err) +} + +func (w *GCWorker) resolveLocksForRange( + ctx context.Context, + forceResolveLocksTS uint64, + tryResolveLocksTS uint64, + startKey []byte, + endKey []byte, +) (rangetask.TaskStat, error) { // for scan lock request, we must return all locks even if they are generated // by the same transaction. because gc worker need to make sure all locks have been // cleaned. req := tikvrpc.NewRequest(tikvrpc.CmdScanLock, &kvrpcpb.ScanLockRequest{ - MaxVersion: safePoint, + MaxVersion: tryResolveLocksTS, Limit: gcScanLockLimit, }, kvrpcpb.Context{ RequestSource: tikvutil.RequestSourceFromCtx(ctx), @@ -1129,19 +1227,11 @@ retryScanAndResolve: locks = append(locks, txnlock.NewLock(li)) } if w.testingKnobs.scanLocks != nil { - locks = append(locks, w.testingKnobs.scanLocks(key, loc.Region.GetID())...) + locks = append(locks, w.testingKnobs.scanLocks(key, loc.Region.GetID(), tryResolveLocksTS)...) } locForResolve := loc for { - var ( - ok bool - err1 error - ) - if w.testingKnobs.resolveLocks != nil { - ok, err1 = w.testingKnobs.resolveLocks(locks, locForResolve.Region) - } else { - ok, err1 = w.tikvStore.GetLockResolver().BatchResolveLocks(bo, locks, locForResolve.Region) - } + ok, err1 := w.batchResolveExpiredLocks(bo, locks, locForResolve.Region, forceResolveLocksTS, tryResolveLocksTS) if err1 != nil { return stat, errors.Trace(err1) } diff --git a/store/gcworker/gc_worker_test.go b/store/gcworker/gc_worker_test.go index 09ec0437f6d79..362c304efafe5 100644 --- a/store/gcworker/gc_worker_test.go +++ b/store/gcworker/gc_worker_test.go @@ -269,6 +269,17 @@ func TestGetOracleTime(t *testing.T) { timeEqual(t, t2, t1.Add(time.Second*10), time.Millisecond*10) } +func TestGetLowResolveTS(t *testing.T) { + s, clean := createGCWorkerSuite(t) + defer clean() + + lowResolveTS, err := s.gcWorker.getTryResolveLocksTS() + require.NoError(t, err) + + lowResolveTime := oracle.GetTimeFromTS(lowResolveTS) + timeEqual(t, time.Now(), lowResolveTime.Add(gcTryResolveLocksIntervalFromNow), time.Millisecond*10) +} + func TestMinStartTS(t *testing.T) { s, clean := createGCWorkerSuite(t) defer clean() @@ -915,7 +926,8 @@ func TestResolveLockRangeInfine(t *testing.T) { require.NoError(t, failpoint.Disable("tikvclient/invalidCacheAndRetry")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/store/gcworker/setGcResolveMaxBackoff")) }() - _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, []byte{0}, []byte{1}) + + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, 3, []byte{0}, []byte{1}) require.Error(t, err) } @@ -928,31 +940,92 @@ func TestResolveLockRangeMeetRegionCacheMiss(t *testing.T) { scanCntRef = &scanCnt resolveCnt int resolveCntRef = &resolveCnt + + scanLockCnt int + resolveBeforeSafepointLockCnt int + resolveAfterSafepointLockCnt int + safepointTS uint64 = 434245550444904450 + lowResolveTS uint64 = 434245550449098752 ) - s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64) []*txnlock.Lock { + + allLocks := []*txnlock.Lock{ + { + Key: []byte{1}, + // TxnID < safepointTS + TxnID: 434245550444904449, + TTL: 5, + }, + { + Key: []byte{2}, + // safepointTS < TxnID < lowResolveTS , TxnID + TTL < lowResolveTS + TxnID: 434245550445166592, + TTL: 10, + }, + { + Key: []byte{3}, + // safepointTS < TxnID < lowResolveTS , TxnID + TTL > lowResolveTS + TxnID: 434245550445166593, + TTL: 20, + }, + { + Key: []byte{4}, + // TxnID > lowResolveTS + TxnID: 434245550449099752, + TTL: 20, + }, + } + + s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock { *scanCntRef++ - return []*txnlock.Lock{ - { - Key: []byte{1}, - }, - { - Key: []byte{1}, - }, + + locks := make([]*txnlock.Lock, 0) + for _, l := range allLocks { + if l.TxnID <= maxVersion { + locks = append(locks, l) + scanLockCnt++ + } } + return locks } - s.gcWorker.testingKnobs.resolveLocks = func(locks []*txnlock.Lock, regionID tikv.RegionVerID) (ok bool, err error) { + s.gcWorker.testingKnobs.batchResolveLocks = func( + locks []*txnlock.Lock, + regionID tikv.RegionVerID, + safepoint uint64, + ) (ok bool, err error) { *resolveCntRef++ if *resolveCntRef == 1 { s.gcWorker.tikvStore.GetRegionCache().InvalidateCachedRegion(regionID) // mock the region cache miss error return false, nil } + + resolveBeforeSafepointLockCnt = len(locks) + for _, l := range locks { + require.True(t, l.TxnID <= safepoint) + } return true, nil } - _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, []byte{0}, []byte{10}) + + s.gcWorker.testingKnobs.resolveLocks = func( + locks []*txnlock.Lock, + lowResolutionTS uint64, + ) (int64, error) { + for _, l := range locks { + expiredTS := oracle.ComposeTS(oracle.ExtractPhysical(l.TxnID)+int64(l.TTL), oracle.ExtractLogical(l.TxnID)) + if expiredTS <= lowResolutionTS { + resolveAfterSafepointLockCnt++ + } + } + return 0, nil + } + + _, err := s.gcWorker.resolveLocksForRange(gcContext(), safepointTS, lowResolveTS, []byte{0}, []byte{10}) require.NoError(t, err) require.Equal(t, 2, resolveCnt) require.Equal(t, 1, scanCnt) + require.Equal(t, 3, scanLockCnt) + require.Equal(t, 1, resolveBeforeSafepointLockCnt) + require.Equal(t, 1, resolveAfterSafepointLockCnt) } func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { @@ -978,7 +1051,7 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { s.cluster.Split(s.initRegion.regionID, region2, []byte("m"), newPeers, newPeers[0]) // init a, b lock in region1 and o, p locks in region2 - s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64) []*txnlock.Lock { + s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock { if regionID == s.initRegion.regionID { return []*txnlock.Lock{{Key: []byte("a")}, {Key: []byte("b")}} } @@ -988,7 +1061,11 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { return []*txnlock.Lock{} } - s.gcWorker.testingKnobs.resolveLocks = func(locks []*txnlock.Lock, regionID tikv.RegionVerID) (ok bool, err error) { + s.gcWorker.testingKnobs.batchResolveLocks = func( + locks []*txnlock.Lock, + regionID tikv.RegionVerID, + safepoint uint64, + ) (ok bool, err error) { if regionID.GetID() == s.initRegion.regionID && *firstAccessRef { *firstAccessRef = false // merge region2 into region1 and return EpochNotMatch error. @@ -1001,7 +1078,7 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { []*metapb.Region{regionMeta}) require.NoError(t, err) // also let region1 contains all 4 locks - s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64) []*txnlock.Lock { + s.gcWorker.testingKnobs.scanLocks = func(key []byte, regionID uint64, maxVersion uint64) []*txnlock.Lock { if regionID == s.initRegion.regionID { locks := []*txnlock.Lock{ {Key: []byte("a")}, @@ -1024,8 +1101,14 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { } return true, nil } + s.gcWorker.testingKnobs.resolveLocks = func( + locks []*txnlock.Lock, + lowResolutionTS uint64, + ) (int64, error) { + return 0, nil + } - _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, []byte(""), []byte("z")) + _, err := s.gcWorker.resolveLocksForRange(gcContext(), 1, 3, []byte(""), []byte("z")) require.NoError(t, err) require.Len(t, resolvedLock, 4) expects := [][]byte{[]byte("a"), []byte("b"), []byte("o"), []byte("p")} @@ -1783,6 +1866,77 @@ func TestGCWithPendingTxn(t *testing.T) { require.NoError(t, err) err = txn.Commit(ctx) + require.Error(t, err) +} + +func TestGCWithPendingTxn2(t *testing.T) { + s, clean := createGCWorkerSuite(t) + defer clean() + + ctx := gcContext() + gcSafePointCacheInterval = 0 + err := s.gcWorker.saveValueToSysTable(gcEnableKey, booleanFalse) + require.NoError(t, err) + + now, err := s.oracle.GetTimestamp(ctx, &oracle.Option{}) + require.NoError(t, err) + + // Prepare to run gc with txn's startTS as the safepoint ts. + spkv := s.tikvStore.GetSafePointKV() + err = spkv.Put(fmt.Sprintf("%s/%s", infosync.ServerMinStartTSPath, "a"), strconv.FormatUint(now, 10)) + require.NoError(t, err) + s.mustSetTiDBServiceSafePoint(t, now, now) + veryLong := gcDefaultLifeTime * 100 + err = s.gcWorker.saveTime(gcLastRunTimeKey, oracle.GetTimeFromTS(s.mustAllocTs(t)).Add(-veryLong)) + require.NoError(t, err) + s.gcWorker.lastFinish = time.Now().Add(-veryLong) + err = s.gcWorker.saveValueToSysTable(gcEnableKey, booleanTrue) + require.NoError(t, err) + + // lock the key1 + k1 := []byte("tk1") + v1 := []byte("v1") + txn, err := s.store.Begin(tikv.WithStartTS(now)) + require.NoError(t, err) + txn.SetOption(kv.Pessimistic, true) + lockCtx := &kv.LockCtx{ForUpdateTS: txn.StartTS(), WaitStartTime: time.Now()} + + err = txn.Set(k1, v1) + require.NoError(t, err) + err = txn.LockKeys(ctx, lockCtx, k1) + require.NoError(t, err) + + // lock the key2 + k2 := []byte("tk2") + v2 := []byte("v2") + startTS := oracle.ComposeTS(oracle.ExtractPhysical(now)+10000, oracle.ExtractLogical(now)) + txn2, err := s.store.Begin(tikv.WithStartTS(startTS)) + require.NoError(t, err) + txn2.SetOption(kv.Pessimistic, true) + lockCtx = &kv.LockCtx{ForUpdateTS: txn2.StartTS(), WaitStartTime: time.Now()} + + err = txn2.Set(k2, v2) + require.NoError(t, err) + err = txn2.LockKeys(ctx, lockCtx, k2) + require.NoError(t, err) + + // Trigger the tick let the gc job start. + s.oracle.AddOffset(time.Minute * 5) + err = s.gcWorker.leaderTick(ctx) + require.NoError(t, err) + // Wait for GC finish + select { + case err = <-s.gcWorker.done: + s.gcWorker.gcIsRunning = false + break + case <-time.After(time.Second * 10): + err = errors.New("receive from s.gcWorker.done timeout") + } + require.NoError(t, err) + + err = txn.Commit(ctx) + require.Error(t, err) + err = txn2.Commit(ctx) require.NoError(t, err) } diff --git a/store/mockstore/unistore/cophandler/cop_handler.go b/store/mockstore/unistore/cophandler/cop_handler.go index 3351f01f71888..75fa686ff8fca 100644 --- a/store/mockstore/unistore/cophandler/cop_handler.go +++ b/store/mockstore/unistore/cophandler/cop_handler.go @@ -501,7 +501,7 @@ func genRespWithMPPExec(chunks []tipb.Chunk, lastRange *coprocessor.KeyRange, co } } resp.ExecDetails = &kvrpcpb.ExecDetails{ - TimeDetail: &kvrpcpb.TimeDetail{ProcessWallTimeMs: int64(dur / time.Millisecond)}, + TimeDetail: &kvrpcpb.TimeDetail{ProcessWallTimeMs: uint64(dur / time.Millisecond)}, } resp.ExecDetailsV2 = &kvrpcpb.ExecDetailsV2{ TimeDetail: resp.ExecDetails.TimeDetail, diff --git a/store/mockstore/unistore/lockstore/load_dump.go b/store/mockstore/unistore/lockstore/load_dump.go index f0192331ecd48..4f8e9acf46dff 100644 --- a/store/mockstore/unistore/lockstore/load_dump.go +++ b/store/mockstore/unistore/lockstore/load_dump.go @@ -27,6 +27,7 @@ import ( // LoadFromFile load a meta from a file. func (ls *MemStore) LoadFromFile(fileName string) (meta []byte, err error) { + //nolint: gosec f, err := os.Open(fileName) if err != nil { if os.IsNotExist(err) { diff --git a/testkit/testdata/testdata.go b/testkit/testdata/testdata.go index 851a9924153b9..8bc7f101f5d6f 100644 --- a/testkit/testdata/testdata.go +++ b/testkit/testdata/testdata.go @@ -83,6 +83,7 @@ func loadTestSuiteData(dir, suiteName string) (res TestData, err error) { } func loadTestSuiteCases(filePath string) (res []testCases, err error) { + //nolint: gosec jsonFile, err := os.Open(filePath) if err != nil { return res, err diff --git a/util/benchdaily/bench_daily.go b/util/benchdaily/bench_daily.go index 32c431159e18e..7722def61cced 100644 --- a/util/benchdaily/bench_daily.go +++ b/util/benchdaily/bench_daily.go @@ -85,6 +85,7 @@ func Run(tests ...func(b *testing.B)) { // readBenchResultFromFile is used by the daily bench test. // nolint: unused, deadcode func readBenchResultFromFile(file string) []BenchResult { + //nolint: gosec f, err := os.Open(file) if err != nil { log.Panic(err) diff --git a/util/mathutil/math.go b/util/mathutil/math.go index 5461a676caa27..85ae503935a8b 100644 --- a/util/mathutil/math.go +++ b/util/mathutil/math.go @@ -68,7 +68,7 @@ func IsFinite(f float64) bool { } // Max returns the largest one from its arguments. -func Max[v constraints.Ordered](x v, xs ...v) v { +func Max[T constraints.Ordered](x T, xs ...T) T { max := x for _, n := range xs { if n > max { @@ -79,7 +79,7 @@ func Max[v constraints.Ordered](x v, xs ...v) v { } // Min returns the smallest one from its arguments. -func Min[v constraints.Ordered](x v, xs ...v) v { +func Min[T constraints.Ordered](x T, xs ...T) T { min := x for _, n := range xs { if n < min { @@ -90,7 +90,7 @@ func Min[v constraints.Ordered](x v, xs ...v) v { } // Clamp restrict a value to a certain interval. -func Clamp[v constraints.Integer | constraints.Float](n, min, max v) v { +func Clamp[T constraints.Ordered](n, min, max T) T { if n >= max { return max } else if n <= min { diff --git a/util/mathutil/math_test.go b/util/mathutil/math_test.go index 34a4366bc00a7..99ffab416fdd2 100644 --- a/util/mathutil/math_test.go +++ b/util/mathutil/math_test.go @@ -78,4 +78,7 @@ func TestClamp(t *testing.T) { require.Equal(t, float32(1.0), Clamp(float32(0), 1.0, 3.0)) require.Equal(t, 1, Clamp(0, 1, 1)) require.Equal(t, 1, Clamp(100, 1, 1)) + require.Equal(t, "ab", Clamp("aa", "ab", "xy")) + require.Equal(t, "xy", Clamp("yy", "ab", "xy")) + require.Equal(t, "ab", Clamp("ab", "ab", "ab")) } diff --git a/util/plancodec/id.go b/util/plancodec/id.go index 2b2e5e7e972a2..8b17660bfa589 100644 --- a/util/plancodec/id.go +++ b/util/plancodec/id.go @@ -394,6 +394,8 @@ func PhysicalIDToTypeString(id int) string { return TypeBatchPointGet case typeClusterMemTableReader: return TypeClusterMemTableReader + case typeDataSourceID: + return TypeDataSource case typeLoadDataID: return TypeLoadData case typeTableSampleID: diff --git a/util/plancodec/id_test.go b/util/plancodec/id_test.go index 8a7addd23fdba..fe275d81b2aeb 100644 --- a/util/plancodec/id_test.go +++ b/util/plancodec/id_test.go @@ -87,3 +87,9 @@ func TestPlanIDChanged(t *testing.T) { require.Equal(t, testcase.Expected, testcase.Value) } } + +func TestReverse(t *testing.T) { + for i := 1; i <= 55; i++ { + require.Equal(t, TypeStringToPhysicalID(PhysicalIDToTypeString(i)), i) + } +} diff --git a/util/security.go b/util/security.go index b12c53914bcf1..0a958767a50de 100644 --- a/util/security.go +++ b/util/security.go @@ -84,6 +84,7 @@ func ToTLSConfigWithVerify(caPath, certPath, keyPath string, verifyCN []string) // Create a certificate pool from CA certPool := x509.NewCertPool() + //nolint: gosec ca, err := ioutil.ReadFile(caPath) if err != nil { return nil, errors.Annotate(err, "could not read ca certificate") diff --git a/util/table-filter/parser.go b/util/table-filter/parser.go index 73cf29c5ac277..122984f95f86d 100644 --- a/util/table-filter/parser.go +++ b/util/table-filter/parser.go @@ -298,6 +298,7 @@ parseLoop: } func (p *matcherParser) importFile(fileName string, parseMatcher func(string, bool) error) error { + //nolint: gosec file, err := os.Open(fileName) if err != nil { return p.annotatef(err, "cannot open filter file")